diff --git a/Wire.go b/Wire.go index e3da203bd6..2c54846243 100644 --- a/Wire.go +++ b/Wire.go @@ -154,6 +154,7 @@ import ( "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" repository7 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" "github.com/devtron-labs/devtron/pkg/notifier" + "github.com/devtron-labs/devtron/pkg/overview" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/draftAwareConfigService" "github.com/devtron-labs/devtron/pkg/pipeline/executors" @@ -984,6 +985,14 @@ func InitializeApp() (*App, error) { acdConfig.NewArgoCDConfigGetter, wire.Bind(new(acdConfig.ArgoCDConfigGetter), new(*acdConfig.ArgoCDConfigGetterImpl)), + + // overview starts + overview.OverviewWireSet, + restHandler.NewOverviewRestHandlerImpl, + wire.Bind(new(restHandler.OverviewRestHandler), new(*restHandler.OverviewRestHandlerImpl)), + + router.NewOverviewRouterImpl, + wire.Bind(new(router.OverviewRouter), new(*router.OverviewRouterImpl)), ) return &App{}, nil } diff --git a/api/restHandler/ImageScanRestHandler.go b/api/restHandler/ImageScanRestHandler.go index e28c286cee..919369d291 100644 --- a/api/restHandler/ImageScanRestHandler.go +++ b/api/restHandler/ImageScanRestHandler.go @@ -19,13 +19,16 @@ package restHandler import ( "encoding/json" "fmt" + "net/http" + "strconv" + "github.com/devtron-labs/devtron/pkg/cluster/environment" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning" securityBean "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/bean" security2 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" + "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" "github.com/devtron-labs/devtron/util/sliceUtil" - "net/http" - "strconv" + "go.opentelemetry.io/otel" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/internal/util" @@ -46,6 +49,8 @@ type ImageScanRestHandler interface { FetchExecutionDetail(w http.ResponseWriter, r *http.Request) FetchMinScanResultByAppIdAndEnvId(w http.ResponseWriter, r *http.Request) VulnerabilityExposure(w http.ResponseWriter, r *http.Request) + VulnerabilitySummary(w http.ResponseWriter, r *http.Request) + VulnerabilityListing(w http.ResponseWriter, r *http.Request) } type ImageScanRestHandlerImpl struct { @@ -402,3 +407,229 @@ func (impl ImageScanRestHandlerImpl) VulnerabilityExposure(w http.ResponseWriter results.VulnerabilityExposure = vulnerabilityExposure common.WriteJsonResp(w, err, results, http.StatusOK) } + +func (impl ImageScanRestHandlerImpl) VulnerabilitySummary(w http.ResponseWriter, r *http.Request) { + ctx, span := otel.Tracer("imageScanRestHandler").Start(r.Context(), "VulnerabilitySummary") + defer span.End() + + userId, err := impl.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.HandleUnauthorized(w, r) + return + } + + // Parse request body with filters + decoder := json.NewDecoder(r.Body) + var summaryRequest *securityBean.VulnerabilitySummaryRequest + err = decoder.Decode(&summaryRequest) + if err != nil { + impl.logger.Errorw("request err, VulnerabilitySummary", "err", err, "payload", r.Body) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Create ImageScanRequest with filters for fetching deploy info + request := &securityBean.ImageScanRequest{ + ImageScanFilter: bean.ImageScanFilter{ + EnvironmentIds: summaryRequest.EnvironmentIds, + ClusterIds: summaryRequest.ClusterIds, + }, + } + + deployInfoList, err := impl.imageScanService.FetchAllDeployInfo(request) + if err != nil { + impl.logger.Errorw("service err, VulnerabilitySummary", "err", err) + if util.IsErrNoRows(err) { + emptySummary := &securityBean.VulnerabilitySummary{ + TotalVulnerabilities: 0, + SeverityCount: &securityBean.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + NotFixableVulnerabilities: 0, + } + common.WriteJsonResp(w, nil, emptySummary, http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + + filteredDeployInfoList, err := impl.imageScanService.FilterDeployInfoByScannedArtifactsDeployedInEnv(deployInfoList) + if err != nil { + impl.logger.Errorw("request err, FilterDeployInfoListForScannedArtifacts", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + _, rbacSpan := otel.Tracer("imageScanRestHandler").Start(ctx, "RBACProcessing") + token := r.Header.Get("token") + isSuperAdmin := false + if ok := impl.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); ok { + isSuperAdmin = true + } + var ids []int + if isSuperAdmin { + ids = sliceUtil.NewSliceFromFuncExec(filteredDeployInfoList, func(item *security2.ImageScanDeployInfo) int { + return item.Id + }) + } else { + ids, err = impl.getAuthorisedImageScanDeployInfoIds(token, filteredDeployInfoList) + if err != nil { + impl.logger.Errorw("error in getting authorised image scan deploy info ids", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + } + rbacSpan.End() + + if len(ids) == 0 { + emptySummary := &securityBean.VulnerabilitySummary{ + TotalVulnerabilities: 0, + SeverityCount: &securityBean.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + NotFixableVulnerabilities: 0, + } + common.WriteJsonResp(w, nil, emptySummary, http.StatusOK) + return + } + + summary, err := impl.imageScanService.FetchVulnerabilitySummary(ctx, summaryRequest, ids) + if err != nil { + impl.logger.Errorw("service err, VulnerabilitySummary", "err", err) + if util.IsErrNoRows(err) { + emptySummary := &securityBean.VulnerabilitySummary{ + TotalVulnerabilities: 0, + SeverityCount: &securityBean.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + NotFixableVulnerabilities: 0, + } + common.WriteJsonResp(w, nil, emptySummary, http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + common.WriteJsonResp(w, err, summary, http.StatusOK) +} + +func (impl ImageScanRestHandlerImpl) VulnerabilityListing(w http.ResponseWriter, r *http.Request) { + ctx, span := otel.Tracer("imageScanRestHandler").Start(r.Context(), "VulnerabilityListing") + defer span.End() + + userId, err := impl.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.HandleUnauthorized(w, r) + return + } + + // Parse request body + decoder := json.NewDecoder(r.Body) + var request *securityBean.VulnerabilityListingRequest + err = decoder.Decode(&request) + if err != nil { + impl.logger.Errorw("request err, VulnerabilityListing", "err", err, "payload", r.Body) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Fetch all deploy info to apply RBAC + deployInfoRequest := &securityBean.ImageScanRequest{ + ImageScanFilter: bean.ImageScanFilter{ + EnvironmentIds: request.EnvironmentIds, + ClusterIds: request.ClusterIds, + }, + } + + deployInfoList, err := impl.imageScanService.FetchAllDeployInfo(deployInfoRequest) + if err != nil { + impl.logger.Errorw("service err, VulnerabilityListing", "err", err) + if util.IsErrNoRows(err) { + emptyResponse := &securityBean.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: 0, + Vulnerabilities: []*securityBean.VulnerabilityDetail{}, + } + common.WriteJsonResp(w, nil, emptyResponse, http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + + filteredDeployInfoList, err := impl.imageScanService.FilterDeployInfoByScannedArtifactsDeployedInEnv(deployInfoList) + if err != nil { + impl.logger.Errorw("request err, FilterDeployInfoListForScannedArtifacts", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + // Apply RBAC + _, rbacSpan := otel.Tracer("imageScanRestHandler").Start(ctx, "RBACProcessing") + token := r.Header.Get("token") + isSuperAdmin := false + if ok := impl.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); ok { + isSuperAdmin = true + } + var ids []int + if isSuperAdmin { + ids = sliceUtil.NewSliceFromFuncExec(filteredDeployInfoList, func(item *security2.ImageScanDeployInfo) int { + return item.Id + }) + } else { + ids, err = impl.getAuthorisedImageScanDeployInfoIds(token, filteredDeployInfoList) + if err != nil { + impl.logger.Errorw("error in getting authorised image scan deploy info ids", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + } + rbacSpan.End() + + if len(ids) == 0 { + emptyResponse := &securityBean.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: 0, + Vulnerabilities: []*securityBean.VulnerabilityDetail{}, + } + common.WriteJsonResp(w, nil, emptyResponse, http.StatusOK) + return + } + + // Fetch vulnerability listing + listing, err := impl.imageScanService.FetchVulnerabilityListing(ctx, request, ids) + if err != nil { + impl.logger.Errorw("service err, VulnerabilityListing", "err", err) + if util.IsErrNoRows(err) { + emptyResponse := &securityBean.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: 0, + Vulnerabilities: []*securityBean.VulnerabilityDetail{}, + } + common.WriteJsonResp(w, nil, emptyResponse, http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + common.WriteJsonResp(w, err, listing, http.StatusOK) +} diff --git a/api/restHandler/OverviewRestHandler.go b/api/restHandler/OverviewRestHandler.go new file mode 100644 index 0000000000..8830392036 --- /dev/null +++ b/api/restHandler/OverviewRestHandler.go @@ -0,0 +1,643 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package restHandler + +import ( + "fmt" + "net/http" + + "github.com/devtron-labs/devtron/api/restHandler/common" + "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" + "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + "github.com/devtron-labs/devtron/pkg/overview/util" + "github.com/gorilla/schema" + "go.uber.org/zap" + "gopkg.in/go-playground/validator.v9" +) + +type OverviewRestHandler interface { + GetAppsOverview(w http.ResponseWriter, r *http.Request) + GetWorkflowOverview(w http.ResponseWriter, r *http.Request) + GetBuildDeploymentActivity(w http.ResponseWriter, r *http.Request) + GetBuildDeploymentActivityDetailed(w http.ResponseWriter, r *http.Request) + GetDoraMetrics(w http.ResponseWriter, r *http.Request) + GetInsights(w http.ResponseWriter, r *http.Request) + + // Cluster Management Overview + GetClusterOverview(w http.ResponseWriter, r *http.Request) + DeleteClusterOverviewCache(w http.ResponseWriter, r *http.Request) + RefreshClusterOverviewCache(w http.ResponseWriter, r *http.Request) + + // Cluster Overview Detailed Drill-down API (unified endpoint) + GetClusterOverviewDetailedNodeInfo(w http.ResponseWriter, r *http.Request) + + // Security Overview APIs + GetSecurityOverview(w http.ResponseWriter, r *http.Request) + GetSeverityInsights(w http.ResponseWriter, r *http.Request) + GetDeploymentSecurityStatus(w http.ResponseWriter, r *http.Request) + GetVulnerabilityTrend(w http.ResponseWriter, r *http.Request) + GetBlockedDeploymentsTrend(w http.ResponseWriter, r *http.Request) +} + +type OverviewRestHandlerImpl struct { + logger *zap.SugaredLogger + overviewService overview.OverviewService + userService user.UserService + validator *validator.Validate + enforcer casbin.Enforcer +} + +func NewOverviewRestHandlerImpl( + logger *zap.SugaredLogger, + overviewService overview.OverviewService, + userService user.UserService, + validator *validator.Validate, + enforcer casbin.Enforcer, +) *OverviewRestHandlerImpl { + return &OverviewRestHandlerImpl{ + logger: logger, + overviewService: overviewService, + userService: userService, + validator: validator, + enforcer: enforcer, + } +} + +// validateTimeParameters validates that either timeWindow is provided or both from and to are provided +// Returns error if validation fails +func validateTimeParameters(timeWindow, from, to string) error { + hasTimeWindow := len(timeWindow) > 0 + hasFromTo := len(from) > 0 && len(to) > 0 + + if !hasTimeWindow && !hasFromTo { + return fmt.Errorf("either timeWindow or both from/to parameters must be provided") + } + + return nil +} + +func (handler *OverviewRestHandlerImpl) GetAppsOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.overviewService.GetAppsOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting apps overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetWorkflowOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.overviewService.GetWorkflowOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting workflow overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetBuildDeploymentActivity(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Parse from and to parameters + request, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + buildDeploymentRequest := &bean.BuildDeploymentActivityRequest{ + From: request.From, + To: request.To, + } + + if err := handler.validator.Struct(buildDeploymentRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetBuildDeploymentActivity(r.Context(), buildDeploymentRequest) + if err != nil { + handler.logger.Errorw("error in getting build deployment activity", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetBuildDeploymentActivityDetailed(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + activityKind := r.URL.Query().Get("activityKind") + if activityKind == "" { + handler.logger.Errorw("activityKind query parameter is required") + common.WriteJsonResp(w, fmt.Errorf("activityKind query parameter is required"), nil, http.StatusBadRequest) + return + } + + request, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + aggregationType := constants.GetAggregationType(constants.TimePeriod(timeWindow)) + + buildDeploymentDetailedRequest := &bean.BuildDeploymentActivityDetailedRequest{ + ActivityKind: bean.ActivityKind(activityKind), + AggregationType: aggregationType, + From: request.From, + To: request.To, + } + + if err := handler.validator.Struct(buildDeploymentDetailedRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetBuildDeploymentActivityDetailed(r.Context(), buildDeploymentDetailedRequest) + if err != nil { + handler.logger.Errorw("error in getting build deployment activity detailed", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetDoraMetrics(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Get both current and previous time ranges + currentTimeWindow, prevTimeWindow, err := util.GetCurrentAndPreviousTimeRangeBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing time periods", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + doraRequest := &bean.DoraMetricsRequest{ + TimeRangeRequest: currentTimeWindow, + PrevFrom: prevTimeWindow.From, + PrevTo: prevTimeWindow.To, + } + + if err := handler.validator.Struct(doraRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetDoraMetrics(r.Context(), doraRequest) + if err != nil { + handler.logger.Errorw("error in getting DORA metrics", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetInsights(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + request, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Extract new query parameters + pipelineTypeStr := r.URL.Query().Get("pipelineType") + if pipelineTypeStr == "" { + handler.logger.Errorw("pipelineType parameter is required") + common.WriteJsonResp(w, fmt.Errorf("pipelineType parameter is required"), nil, http.StatusBadRequest) + return + } + + // Validate pipelineType + var pipelineType bean.PipelineType + switch pipelineTypeStr { + case string(bean.BuildPipelines): + pipelineType = bean.BuildPipelines + case string(bean.DeploymentPipelines): + pipelineType = bean.DeploymentPipelines + default: + handler.logger.Errorw("invalid pipelineType parameter", "pipelineType", pipelineTypeStr) + common.WriteJsonResp(w, fmt.Errorf("invalid pipelineType parameter. Must be 'buildPipelines' or 'deploymentPipelines'"), nil, http.StatusBadRequest) + return + } + + sortOrderStr := r.URL.Query().Get("sortOrder") + if sortOrderStr == "" { + sortOrderStr = string(bean.DESC) // Default to DESC + } + + // Validate sortOrder + var sortOrder bean.SortOrder + switch sortOrderStr { + case string(bean.ASC): + sortOrder = bean.ASC + case string(bean.DESC): + sortOrder = bean.DESC + default: + handler.logger.Errorw("invalid sortOrder parameter", "sortOrder", sortOrderStr) + common.WriteJsonResp(w, fmt.Errorf("invalid sortOrder parameter. Must be 'ASC' or 'DESC'"), nil, http.StatusBadRequest) + return + } + + limit, err := common.ExtractIntQueryParam(w, r, "limit", 10) + if err != nil { + handler.logger.Errorw("error in parsing limit parameter", "err", err) + return + } + + offset, err := common.ExtractIntQueryParam(w, r, "offset", 0) + if err != nil { + handler.logger.Errorw("error in parsing offset parameter", "err", err) + return + } + + insightsRequest := &bean.InsightsRequest{ + TimeRangeRequest: request, + PipelineType: pipelineType, + SortOrder: sortOrder, + Limit: limit, + Offset: offset, + } + + if err := handler.validator.Struct(insightsRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetInsights(r.Context(), insightsRequest) + if err != nil { + handler.logger.Errorw("error in getting insights", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// GetClusterOverview handles cluster management overview requests +func (handler *OverviewRestHandlerImpl) GetClusterOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.overviewService.GetClusterOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting cluster overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// DeleteClusterOverviewCache handles cluster overview cache deletion requests +func (handler *OverviewRestHandlerImpl) DeleteClusterOverviewCache(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + err = handler.overviewService.DeleteClusterOverviewCache(r.Context()) + if err != nil { + handler.logger.Errorw("error in deleting cluster overview cache", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + handler.logger.Infow("cluster overview cache deleted successfully", "userId", userId) + common.WriteJsonResp(w, nil, map[string]string{"message": "Cluster overview cache deleted successfully"}, http.StatusOK) +} + +// RefreshClusterOverviewCache handles cluster overview cache refresh requests +func (handler *OverviewRestHandlerImpl) RefreshClusterOverviewCache(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + err = handler.overviewService.RefreshClusterOverviewCache(r.Context()) + if err != nil { + handler.logger.Errorw("error in refreshing cluster overview cache", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + handler.logger.Infow("cluster overview cache refreshed successfully", "userId", userId) + common.WriteJsonResp(w, nil, nil, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetClusterOverviewDetailedNodeInfo(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + var request bean.ClusterOverviewDetailRequest + decoder := schema.NewDecoder() + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate the request + if err := handler.validator.Struct(request); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetClusterOverviewDetailedNodeInfo(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting cluster overview detail", "err", err, "groupBy", request.GroupBy) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// ============================================================================ +// Security Overview APIs +// ============================================================================ + +func (handler *OverviewRestHandlerImpl) GetSecurityOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + decoder := schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + var request bean.SecurityOverviewRequest + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetSecurityOverview(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting security overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetSeverityInsights(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + decoder := schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + var request bean.SeverityInsightsRequest + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate the request + if err := handler.validator.Struct(request); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetSeverityInsights(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting severity insights", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetDeploymentSecurityStatus(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + decoder := schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + var request bean.DeploymentSecurityStatusRequest + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetDeploymentSecurityStatus(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting deployment security status", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetVulnerabilityTrend(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + envType := r.URL.Query().Get("envType") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate envType + if envType != string(bean.EnvTypeProd) && envType != string(bean.EnvTypeNonProd) && envType != string(bean.EnvTypeAll) { + handler.logger.Errorw("invalid envType", "envType", envType) + common.WriteJsonResp(w, fmt.Errorf("envType must be 'prod', 'non-prod' or 'all'"), nil, http.StatusBadRequest) + return + } + + // Get both current and previous time ranges + currentTimeWindow, _, err := util.GetCurrentAndPreviousTimeRangeBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing time periods", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Determine aggregation type based on time range + timePeriod := util.GetTimePeriodFromTimeRange(currentTimeWindow.From, currentTimeWindow.To) + aggregationType := constants.GetAggregationType(timePeriod) + + result, err := handler.overviewService.GetVulnerabilityTrend(r.Context(), currentTimeWindow, bean.EnvType(envType), aggregationType) + if err != nil { + handler.logger.Errorw("error in getting vulnerability trend", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetBlockedDeploymentsTrend(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Get current time range + currentTimeWindow, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing time period", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Determine aggregation type based on time range + timePeriod := util.GetTimePeriodFromTimeRange(currentTimeWindow.From, currentTimeWindow.To) + aggregationType := constants.GetAggregationType(timePeriod) + + result, err := handler.overviewService.GetBlockedDeploymentsTrend(r.Context(), currentTimeWindow, aggregationType) + if err != nil { + handler.logger.Errorw("error in getting blocked deployments trend", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} diff --git a/api/router/ImageScanRouter.go b/api/router/ImageScanRouter.go index 7feabb13a7..b67133eb65 100644 --- a/api/router/ImageScanRouter.go +++ b/api/router/ImageScanRouter.go @@ -34,6 +34,8 @@ func NewImageScanRouterImpl(imageScanRestHandler restHandler.ImageScanRestHandle func (impl ImageScanRouterImpl) InitImageScanRouter(configRouter *mux.Router) { configRouter.Path("/list").HandlerFunc(impl.imageScanRestHandler.ScanExecutionList).Methods("POST") + configRouter.Path("/summary").HandlerFunc(impl.imageScanRestHandler.VulnerabilitySummary).Methods("POST") + configRouter.Path("/vulnerabilities").HandlerFunc(impl.imageScanRestHandler.VulnerabilityListing).Methods("POST") //image=image:abc&envId=3&appId=100&artifactId=100&executionId=100 configRouter.Path("/executionDetail").HandlerFunc(impl.imageScanRestHandler.FetchExecutionDetail).Methods("GET") diff --git a/api/router/OverviewRouter.go b/api/router/OverviewRouter.go new file mode 100644 index 0000000000..895ed9a002 --- /dev/null +++ b/api/router/OverviewRouter.go @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package router + +import ( + "github.com/devtron-labs/devtron/api/restHandler" + "github.com/gorilla/mux" +) + +type OverviewRouter interface { + InitOverviewRouter(overviewRouter *mux.Router) +} + +type OverviewRouterImpl struct { + overviewRestHandler restHandler.OverviewRestHandler +} + +func NewOverviewRouterImpl(overviewRestHandler restHandler.OverviewRestHandler) *OverviewRouterImpl { + return &OverviewRouterImpl{ + overviewRestHandler: overviewRestHandler, + } +} + +func (router OverviewRouterImpl) InitOverviewRouter(overviewRouter *mux.Router) { + // New Apps Overview API + overviewRouter.Path("/apps-overview"). + HandlerFunc(router.overviewRestHandler.GetAppsOverview). + Methods("GET") + + // New Workflow Overview API + overviewRouter.Path("/workflow-overview"). + HandlerFunc(router.overviewRestHandler.GetWorkflowOverview). + Methods("GET") + + // Build and Deployment Activity + overviewRouter.Path("/build-deployment-activity"). + HandlerFunc(router.overviewRestHandler.GetBuildDeploymentActivity). + Methods("GET") + + // Build and Deployment Activity Detailed + overviewRouter.Path("/build-deployment-activity/detailed"). + HandlerFunc(router.overviewRestHandler.GetBuildDeploymentActivityDetailed). + Methods("GET") + + // DORA Metrics + overviewRouter.Path("/dora-metrics"). + HandlerFunc(router.overviewRestHandler.GetDoraMetrics). + Methods("GET") + + // Pipeline Insights + overviewRouter.Path("/pipeline-insights"). + HandlerFunc(router.overviewRestHandler.GetInsights). + Methods("GET") + + // Infra Overview Subrouter + infraOverviewRouter := overviewRouter.PathPrefix("/infra").Subrouter() + + // Cluster Management Overview + infraOverviewRouter.Path(""). + HandlerFunc(router.overviewRestHandler.GetClusterOverview). + Methods("GET") + + // Delete Cluster Overview Cache + infraOverviewRouter.Path("/cache"). + HandlerFunc(router.overviewRestHandler.DeleteClusterOverviewCache). + Methods("DELETE") + + // Refresh Cluster Overview Cache + infraOverviewRouter.Path("/refresh"). + HandlerFunc(router.overviewRestHandler.RefreshClusterOverviewCache). + Methods("GET") + + // Cluster Overview Detailed Node Info + infraOverviewRouter.Path("/node-list"). + HandlerFunc(router.overviewRestHandler.GetClusterOverviewDetailedNodeInfo). + Methods("GET") + + // Security Overview Subrouter + securityOverviewRouter := overviewRouter.PathPrefix("/security").Subrouter() + + // Security Overview - "At a Glance" metrics (organization-wide) + securityOverviewRouter.Path("/security-glance"). + HandlerFunc(router.overviewRestHandler.GetSecurityOverview). + Methods("GET") + + // Severity Insights - With prod/non-prod filtering + securityOverviewRouter.Path("/severity-insights"). + HandlerFunc(router.overviewRestHandler.GetSeverityInsights). + Methods("GET") + + // Deployment Security Status + securityOverviewRouter.Path("/deployment-security-status"). + HandlerFunc(router.overviewRestHandler.GetDeploymentSecurityStatus). + Methods("GET") + + // Vulnerability Trend - Time-series with prod/non-prod filtering + securityOverviewRouter.Path("/vulnerability-trend"). + HandlerFunc(router.overviewRestHandler.GetVulnerabilityTrend). + Methods("GET") + + // Blocked Deployments Trend - Organization-wide + securityOverviewRouter.Path("/blocked-deployments-trend"). + HandlerFunc(router.overviewRestHandler.GetBlockedDeploymentsTrend). + Methods("GET") + +} diff --git a/api/router/router.go b/api/router/router.go index 0ea41a35a8..a1a7a05662 100644 --- a/api/router/router.go +++ b/api/router/router.go @@ -18,6 +18,8 @@ package router import ( "encoding/json" + "net/http" + "github.com/devtron-labs/devtron/api/apiToken" "github.com/devtron-labs/devtron/api/appStore" "github.com/devtron-labs/devtron/api/appStore/chartGroup" @@ -55,7 +57,6 @@ import ( "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" - "net/http" ) type MuxRouter struct { @@ -124,6 +125,7 @@ type MuxRouter struct { devtronResourceRouter devtronResource.DevtronResourceRouter scanningResultRouter resourceScan.ScanningResultRouter userResourceRouter userResource.Router + overviewRouter OverviewRouter } func NewMuxRouter(logger *zap.SugaredLogger, @@ -159,6 +161,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, fluxApplicationRouter fluxApplication2.FluxApplicationRouter, scanningResultRouter resourceScan.ScanningResultRouter, userResourceRouter userResource.Router, + overviewRouter OverviewRouter, ) *MuxRouter { r := &MuxRouter{ Router: mux.NewRouter(), @@ -226,6 +229,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, fluxApplicationRouter: fluxApplicationRouter, scanningResultRouter: scanningResultRouter, userResourceRouter: userResourceRouter, + overviewRouter: overviewRouter, } return r } @@ -444,4 +448,6 @@ func (r MuxRouter) Init() { fluxApplicationRouter := r.Router.PathPrefix("/orchestrator/flux-application").Subrouter() r.fluxApplicationRouter.InitFluxApplicationRouter(fluxApplicationRouter) + overviewRouter := r.Router.PathPrefix("/orchestrator/overview").Subrouter() + r.overviewRouter.InitOverviewRouter(overviewRouter) } diff --git a/client/lens/LensClient.go b/client/lens/LensClient.go index 784c14598d..4084f710e4 100644 --- a/client/lens/LensClient.go +++ b/client/lens/LensClient.go @@ -19,14 +19,16 @@ package lens import ( "bytes" "encoding/json" - "github.com/caarlos0/env" - "go.uber.org/zap" + "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" + + "github.com/caarlos0/env" + "go.uber.org/zap" ) type LensConfig struct { @@ -41,6 +43,7 @@ func (code StatusCode) IsSuccess() bool { type LensClient interface { GetAppMetrics(metricRequest *MetricRequest) (resBody []byte, resCode *StatusCode, err error) + GetBulkAppMetrics(bulkRequest *BulkMetricRequest) (*LensResponse, *StatusCode, error) } type LensClientImpl struct { httpClient *http.Client @@ -66,7 +69,21 @@ func NewLensClientImpl(config *LensConfig, logger *zap.SugaredLogger) (*LensClie type ClientRequest struct { Method string Path string - RequestBody *MetricRequest + RequestBody interface{} +} + +type LensResponse struct { + Code int `json:"code,omitempty"` + Status string `json:"status,omitempty"` + Result json.RawMessage `json:"result,omitempty"` + Errors []*LensApiError `json:"errors,omitempty"` +} +type LensApiError struct { + HttpStatusCode int `json:"-"` + Code string `json:"code,omitempty"` + InternalMessage string `json:"internalMessage,omitempty"` + UserMessage string `json:"userMessage,omitempty"` + UserDetailMessage string `json:"userDetailMessage,omitempty"` } func (session *LensClientImpl) doRequest(clientRequest *ClientRequest) (resBody []byte, resCode *StatusCode, err error) { @@ -109,6 +126,50 @@ type MetricRequest struct { To string `json:"to"` } +type AppEnvPair struct { + AppId int `json:"appId"` + EnvId int `json:"envId"` +} + +type BulkMetricRequest struct { + AppEnvPairs []AppEnvPair `json:"appEnvPairs"` + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type Metrics struct { + AverageCycleTime float64 `json:"average_cycle_time"` + AverageLeadTime float64 `json:"average_lead_time"` + ChangeFailureRate float64 `json:"change_failure_rate"` + AverageRecoveryTime float64 `json:"average_recovery_time"` + AverageDeploymentSize float32 `json:"average_deployment_size"` + AverageLineAdded float32 `json:"average_line_added"` + AverageLineDeleted float32 `json:"average_line_deleted"` + LastFailedTime string `json:"last_failed_time"` + RecoveryTimeLastFailed float64 `json:"recovery_time_last_failed"` +} + +type AppEnvMetrics struct { + AppId int `json:"appId"` + EnvId int `json:"envId"` + Metrics *Metrics `json:"metrics"` + Error string `json:"error,omitempty"` +} + +type BulkMetricsResponse struct { + Results []AppEnvMetrics `json:"results"` +} + +// DoraMetrics represents the new response structure from Lens API +type DoraMetrics struct { + AppId int `json:"app_id"` + EnvId int `json:"env_id"` + DeploymentFrequency float64 `json:"deployment_frequency"` // Deployments per day + ChangeFailureRate float64 `json:"change_failure_rate"` // Percentage + MeanLeadTimeForChanges float64 `json:"mean_lead_time_for_changes"` // Minutes + MeanTimeToRecovery float64 `json:"mean_time_to_recovery"` // Minutes +} + func (session *LensClientImpl) GetAppMetrics(metricRequest *MetricRequest) (resBody []byte, resCode *StatusCode, err error) { params := url.Values{} params.Add("app_id", strconv.Itoa(metricRequest.AppId)) @@ -128,3 +189,30 @@ func (session *LensClientImpl) GetAppMetrics(metricRequest *MetricRequest) (resB resBody, resCode, err = session.doRequest(req) return resBody, resCode, err } + +func (session *LensClientImpl) GetBulkAppMetrics(bulkRequest *BulkMetricRequest) (*LensResponse, *StatusCode, error) { + u, err := url.Parse("deployment-metrics/bulk") + if err != nil { + return nil, nil, err + } + req := &ClientRequest{ + Method: "GET", + Path: u.String(), + RequestBody: bulkRequest, + } + session.logger.Infow("lens bulk req", "req", req) + resBody, resCode, err := session.doRequest(req) + if err != nil { + return nil, resCode, err + } + if resCode.IsSuccess() { + apiRes := &LensResponse{} + err = json.Unmarshal(resBody, apiRes) + if err != nil { + return nil, resCode, err + } + return apiRes, resCode, nil + } + session.logger.Errorw("api err in git sensor response", "res", string(resBody)) + return nil, resCode, fmt.Errorf("res not success, Statuscode: %v", resCode) +} diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 6ab2fc2c02..f9a8452e36 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run github.com/google/wire/cmd/wire +//go:generate go run -mod=mod github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject diff --git a/env_gen.json b/env_gen.json index 99df280700..2694919f4a 100644 --- a/env_gen.json +++ b/env_gen.json @@ -1 +1 @@ -[{"Category":"CD","Fields":[{"Env":"ARGO_APP_MANUAL_SYNC_TIME","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"CD_FLUX_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status for flux cd pipeline","Example":"","Deprecated":"false"},{"Env":"CD_HELM_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status ","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time for CD pipeline status","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_TIMEOUT_DURATION","EnvType":"string","EnvValue":"20","EnvDescription":"Timeout for CD pipeline to get healthy","Example":"","Deprecated":"false"},{"Env":"DEPLOY_STATUS_CRON_GET_PIPELINE_DEPLOYED_WITHIN_HOURS","EnvType":"int","EnvValue":"12","EnvDescription":"This flag is used to fetch the deployment status of the application. It retrieves the status of deployments that occurred between 12 hours and 10 minutes prior to the current time. It fetches non-terminal statuses.","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"1","EnvDescription":"Context timeout for gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"6","EnvDescription":"Context timeout for no gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CD_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable migration of external argocd application to devtron pipeline","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_FLUX_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable flux application services","Example":"","Deprecated":"false"},{"Env":"FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking flux app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking helm app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"IS_INTERNAL_USE","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled then cd pipeline and helm apps will not need the deployment app type mandatorily. Couple this flag with HIDE_GITOPS_OR_HELM_OPTION (in Dashborad) and if gitops is configured and allowed for the env, pipeline/ helm app will gitops else no-gitops.","Example":"","Deprecated":"false"},{"Env":"MIGRATE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"migrate deployment config data from charts table to deployment_config table","Example":"","Deprecated":"false"},{"Env":"PIPELINE_DEGRADED_TIME","EnvType":"string","EnvValue":"10","EnvDescription":"Time to mark a pipeline degraded if not healthy in defined time","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_DEVTRON_APP","EnvType":"int","EnvValue":"1","EnvDescription":"Count for devtron application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP","EnvType":"int","EnvValue":"0","EnvDescription":"Count for external helm application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_HELM_APP","EnvType":"int","EnvValue":"1","EnvDescription":"To set the history limit for the helm app being deployed through devtron","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_LINKED_HELM_APP","EnvType":"int","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOULD_CHECK_NAMESPACE_ON_CLONE","EnvType":"bool","EnvValue":"false","EnvDescription":"should we check if namespace exists or not while cloning app","Example":"","Deprecated":"false"},{"Env":"USE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"use deployment config data from deployment_config table","Example":"","Deprecated":"true"},{"Env":"VALIDATE_EXT_APP_CHART_TYPE","EnvType":"bool","EnvValue":"false","EnvDescription":"validate external flux app chart","Example":"","Deprecated":"false"}]},{"Category":"CI_BUILDX","Fields":[{"Env":"ASYNC_BUILDX_CACHE_EXPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async container image cache export","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_MODE_MIN","EnvType":"bool","EnvValue":"false","EnvDescription":"To set build cache mode to minimum in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_INTERRUPTION_MAX_RETRY","EnvType":"int","EnvValue":"3","EnvDescription":"Maximum number of retries for buildx builder interruption","Example":"","Deprecated":"false"}]},{"Category":"CI_RUNNER","Fields":[{"Env":"AZURE_ACCOUNT_KEY","EnvType":"string","EnvValue":"","EnvDescription":"If blob storage is being used of azure then pass the secret key to access the bucket","Example":"","Deprecated":"false"},{"Env":"AZURE_ACCOUNT_NAME","EnvType":"string","EnvValue":"","EnvDescription":"Account name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_CACHE","EnvType":"string","EnvValue":"","EnvDescription":"Cache bucket name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_LOG","EnvType":"string","EnvValue":"","EnvDescription":"Log bucket for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_CONNECTION_INSECURE","EnvType":"bool","EnvValue":"true","EnvDescription":"Azure gateway connection allows insecure if true","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_URL","EnvType":"string","EnvValue":"http://devtron-minio.devtroncd:9000","EnvDescription":"Sent to CI runner for blob","Example":"","Deprecated":"false"},{"Env":"BASE_LOG_LOCATION_PATH","EnvType":"string","EnvValue":"/home/devtron/","EnvDescription":"Used to store, download logs of ci workflow, artifact","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_GCP_CREDENTIALS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"GCP cred json for GCS blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_PROVIDER","EnvType":"","EnvValue":"S3","EnvDescription":"Blob storage provider name(AWS/GCP/Azure)","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ACCESS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"S3 access key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_BUCKET_VERSIONED","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable buctet versioning for blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT","EnvType":"string","EnvValue":"","EnvDescription":"S3 endpoint URL for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT_INSECURE","EnvType":"bool","EnvValue":"false","EnvDescription":"To use insecure s3 endpoint","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_SECRET_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Secret key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/devtron/buildx","EnvDescription":"Path for the buildx cache","Example":"","Deprecated":"false"},{"Env":"BUILDX_K8S_DRIVER_OPTIONS","EnvType":"string","EnvValue":"","EnvDescription":"To enable the k8s driver and pass args for k8s driver in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_PROVENANCE_MODE","EnvType":"string","EnvValue":"","EnvDescription":"provinance is set to true by default by docker. this will add some build related data in generated build manifest.it also adds some unknown:unknown key:value pair which may not be compatible by some container registries. with buildx k8s driver , provinenance=true is causing issue when push manifest to quay registry, so setting it to false","Example":"","Deprecated":"false"},{"Env":"BUILD_LOG_TTL_VALUE_IN_SECS","EnvType":"int","EnvValue":"3600","EnvDescription":"This is the time that the pods of ci/pre-cd/post-cd live after completion state.","Example":"","Deprecated":"false"},{"Env":"CACHE_LIMIT","EnvType":"int64","EnvValue":"5000000000","EnvDescription":"Cache limit.","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for Pre/Post cd ","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"Toleration key for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"Toleration value for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for Pre/Post CD(AWF,System)","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"cd-runner","EnvDescription":"Service account to be used in Pre/Post CD pod","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for CI","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CI","Example":"","Deprecated":"false"},{"Env":"CI_IGNORE_DOCKER_CACHE","EnvType":"bool","EnvValue":"","EnvDescription":"Ignoring docker cache ","Example":"","Deprecated":"false"},{"Env":"CI_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for build logs","Example":"","Deprecated":"false"},{"Env":"CI_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Toleration key for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"","EnvDescription":"Toleration value for CI","Example":"","Deprecated":"false"},{"Env":"CI_RUNNER_DOCKER_MTU_VALUE","EnvType":"int","EnvValue":"-1","EnvDescription":"this is to control the bytes of inofrmation passed in a network packet in ci-runner. default is -1 (defaults to the underlying node mtu value)","Example":"","Deprecated":"false"},{"Env":"CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"this is to control the no of linked pipelines should be hanled in one go when a ci-success event of an parent ci is received","Example":"","Deprecated":"false"},{"Env":"CI_VOLUME_MOUNTS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"additional volume mount data for CI and JOB","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for CI(AWF,System)","Example":"","Deprecated":"false"},{"Env":"DEFAULT_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"arsenal-v1/ci-artifacts","EnvDescription":"Key location for artifacts being created","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_BUCKET","EnvType":"string","EnvValue":"devtron-pro-ci-logs","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"arsenal-v1","EnvDescription":"Bucket prefix for build logs","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET","EnvType":"string","EnvValue":"ci-caching","EnvDescription":"Bucket name for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"Build Cache bucket region","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"","EnvDescription":"Bucket prefix for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_LOGS_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"Namespace for devtron stack","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_TIMEOUT","EnvType":"int64","EnvValue":"3600","EnvDescription":"Timeout for Pre/Post-Cd to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CI_IMAGE","EnvType":"string","EnvValue":"686244538589.dkr.ecr.us-east-2.amazonaws.com/cirunner:47","EnvDescription":"To pass the ci-runner image","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TARGET_PLATFORM","EnvType":"string","EnvValue":"","EnvDescription":"Default architecture for buildx","Example":"","Deprecated":"false"},{"Env":"DOCKER_BUILD_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/docker","EnvDescription":"Path to store cache of docker build (/var/lib/docker-\u003e for legacy docker build, /var/lib/devtron-\u003e for buildx)","Example":"","Deprecated":"false"},{"Env":"ENABLE_BUILD_CONTEXT","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable build context in Devtron.","Example":"","Deprecated":"false"},{"Env":"ENABLE_WORKFLOW_EXECUTION_STAGE","EnvType":"bool","EnvValue":"true","EnvDescription":"if enabled then we will display build stages separately for CI/Job/Pre-Post CD","Example":"true","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_CM_NAME","EnvType":"string","EnvValue":"blob-storage-cm","EnvDescription":"name of the config map(contains bucket name, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_SECRET_NAME","EnvType":"string","EnvValue":"blob-storage-secret","EnvDescription":"name of the secret(contains password, accessId,passKeys, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"This is an array of strings used when submitting a workflow for pre or post-CD execution. If the ","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_API_SECRET","EnvType":"string","EnvValue":"devtroncd-secret","EnvDescription":"External CI API secret.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_PAYLOAD","EnvType":"string","EnvValue":"{\"ciProjectDetails\":[{\"gitRepository\":\"https://github.com/vikram1601/getting-started-nodejs.git\",\"checkoutPath\":\"./abc\",\"commitHash\":\"239077135f8cdeeccb7857e2851348f558cb53d3\",\"commitTime\":\"2022-10-30T20:00:00\",\"branch\":\"master\",\"message\":\"Update README.md\",\"author\":\"User Name \"}],\"dockerImage\":\"445808685819.dkr.ecr.us-east-2.amazonaws.com/orch:23907713-2\"}","EnvDescription":"External CI payload with project details.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_WEB_HOOK_URL","EnvType":"string","EnvValue":"","EnvDescription":"default is {{HOST_URL}}/orchestrator/webhook/ext-ci. It is used for external ci.","Example":"","Deprecated":"false"},{"Env":"IGNORE_CM_CS_IN_CI_JOB","EnvType":"bool","EnvValue":"false","EnvDescription":"Ignore CM/CS in CI-pipeline as Job","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_COUNT","EnvType":"int","EnvValue":"0","EnvDescription":"push artifact(image) in ci retry count ","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_INTERVAL","EnvType":"int","EnvValue":"5","EnvDescription":"image retry interval takes value in seconds","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCANNER_ENDPOINT","EnvType":"string","EnvValue":"http://image-scanner-new-demo-devtroncd-service.devtroncd:80","EnvDescription":"Image-scanner micro-service URL","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_MAX_RETRIES","EnvType":"int","EnvValue":"3","EnvDescription":"Max retry count for image-scanning","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay for the image-scaning to start","Example":"","Deprecated":"false"},{"Env":"IN_APP_LOGGING_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"Used in case of argo workflow is enabled. If enabled logs push will be managed by us, else will be managed by argo workflow.","Example":"","Deprecated":"false"},{"Env":"MAX_CD_WORKFLOW_RUNNER_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time pre/post-cd-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MAX_CI_WORKFLOW_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time CI-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MODE","EnvType":"string","EnvValue":"DEV","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"localhost:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_HOST","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd/webhook/msg/nats","EnvDescription":"Orchestrator micro-service URL ","Example":"","Deprecated":"false"},{"Env":"ORCH_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"Orchestrator token","Example":"","Deprecated":"false"},{"Env":"PRE_CI_CACHE_PATH","EnvType":"string","EnvValue":"/devtroncd-cache","EnvDescription":"Cache path for Pre CI tasks","Example":"","Deprecated":"false"},{"Env":"SHOW_DOCKER_BUILD_ARGS","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable showing the args passed for CI in build logs","Example":"","Deprecated":"false"},{"Env":"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL","EnvType":"bool","EnvValue":"false","EnvDescription":"To skip cache Push/Pull for ci job","Example":"","Deprecated":"false"},{"Env":"SKIP_CREATING_ECR_REPO","EnvType":"bool","EnvValue":"false","EnvDescription":"By disabling this ECR repo won't get created if it's not available on ECR from build configuration","Example":"","Deprecated":"false"},{"Env":"TERMINATION_GRACE_PERIOD_SECS","EnvType":"int","EnvValue":"180","EnvDescription":"this is the time given to workflow pods to shutdown. (grace full termination time)","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_QUERY_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 query for listing artifacts","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CD_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post cd","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post ci","Example":"","Deprecated":"false"},{"Env":"USE_BUILDX","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable buildx feature globally","Example":"","Deprecated":"false"},{"Env":"USE_DOCKER_API_TO_GET_DIGEST","EnvType":"bool","EnvValue":"false","EnvDescription":"when user do not pass the digest then this flag controls , finding the image digest using docker API or not. if set to true we get the digest from docker API call else use docker pull command. [logic in ci-runner]","Example":"","Deprecated":"false"},{"Env":"USE_EXTERNAL_NODE","EnvType":"bool","EnvValue":"false","EnvDescription":"It is used in case of Pre/ Post Cd with run in application mode. If enabled the node lebels are read from EXTERNAL_CD_NODE_LABEL_SELECTOR else from CD_NODE_LABEL_SELECTOR MODE: if the vale is DEV, it will read the local kube config file or else from the cluser location.","Example":"","Deprecated":"false"},{"Env":"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD","EnvType":"bool","EnvValue":"false","EnvDescription":"To use the same tag in container image as that of git tag","Example":"","Deprecated":"false"},{"Env":"WF_CONTROLLER_INSTANCE_ID","EnvType":"string","EnvValue":"devtron-runner","EnvDescription":"Workflow controller instance ID.","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_CACHE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"flag is used to configure how Docker caches are handled during a CI/CD ","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"ci-runner","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"-","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ADDITIONAL_NODE_GROUP_LABELS","EnvType":"","EnvValue":"","EnvDescription":"Add comma separated list of additional node group labels to default labels","Example":"karpenter.sh/nodepool,cloud.google.com/gke-nodepool","Deprecated":"false"},{"Env":"APP_SYNC_IMAGE","EnvType":"string","EnvValue":"quay.io/devtron/chart-sync:1227622d-132-3775","EnvDescription":"For the app sync image, this image will be used in app-manual sync job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_JOB_RESOURCES_OBJ","EnvType":"string","EnvValue":"","EnvDescription":"To pass the resource of app sync","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"chart-sync","EnvDescription":"Service account to be used in app sync Job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SHUTDOWN_WAIT_DURATION","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_AUTO_SYNC_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled all argocd application will have auto sync enabled","Example":"true","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_COUNT_ON_CONFLICT","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_DELAY_ON_CONFLICT","EnvType":"int","EnvValue":"1","EnvDescription":"Delay on retrying the maifest commit the on gitops","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_COUNT","EnvType":"int","EnvValue":"4","EnvDescription":"Retry count for registering a GitOps repository to ArgoCD","Example":"3","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay (in Seconds) between the retries for registering a GitOps repository to ArgoCD","Example":"5","Deprecated":"false"},{"Env":"BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"there is feature to get URL's of services/ingresses. so to extract those, we need to parse all the servcie and ingress objects of the application. this BATCH_SIZE flag controls the no of these objects get parsed in one go.","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host for the devtron stack","Example":"","Deprecated":"false"},{"Env":"CD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PORT","EnvType":"string","EnvValue":"8000","EnvDescription":"Port for pre/post-cd","Example":"","Deprecated":"false"},{"Env":"CExpirationTime","EnvType":"int","EnvValue":"600","EnvDescription":"Caching expiration time.","Example":"","Deprecated":"false"},{"Env":"CI_TRIGGER_CRON_TIME","EnvType":"int","EnvValue":"2","EnvDescription":"For image poll plugin","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_STATUS_UPDATE_CRON","EnvType":"string","EnvValue":"*/5 * * * *","EnvDescription":"Cron schedule for CI pipeline status","Example":"","Deprecated":"false"},{"Env":"CLI_CMD_TIMEOUT_GLOBAL_SECONDS","EnvType":"int","EnvValue":"0","EnvDescription":"Used in git cli opeartion timeout","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STATUS_CRON_TIME","EnvType":"int","EnvValue":"15","EnvDescription":"Cron schedule for cluster status on resource browser","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TIMEOUT","EnvType":"float64","EnvValue":"3600","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEVTRON_BOM_URL","EnvType":"string","EnvValue":"https://raw.githubusercontent.com/devtron-labs/devtron/%s/charts/devtron/devtron-bom.yaml","EnvDescription":"Path to devtron-bom.yaml of devtron charts, used for module installation and devtron upgrade","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEX_SECRET_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of dex secret","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_CHART_NAME","EnvType":"string","EnvValue":"devtron-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Name of the Devtron Helm release. ","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of the Devtron Helm release","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_URL","EnvType":"string","EnvValue":"https://helm.devtron.ai","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLATION_TYPE","EnvType":"string","EnvValue":"","EnvDescription":"Devtron Installation type(EA/Full)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_MODULES_PATH","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"Path to devtron installer modules, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_RELEASE_PATH","EnvType":"string","EnvValue":"installer.release","EnvDescription":"Path to devtron installer release, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_MODULES_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_OPERATOR_BASE_PATH","EnvType":"string","EnvValue":"","EnvDescription":"Base path for devtron operator, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_SECRET_NAME","EnvType":"string","EnvValue":"devtron-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_VERSION_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.release","EnvDescription":"devtron operator version identifier in helm values yaml","Example":"","Deprecated":"false"},{"Env":"DEX_CID","EnvType":"string","EnvValue":"example-app","EnvDescription":"dex client id ","Example":"","Deprecated":"false"},{"Env":"DEX_CLIENT_ID","EnvType":"string","EnvValue":"argo-cd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CSTOREKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX CSTOREKEY.","Example":"","Deprecated":"false"},{"Env":"DEX_JWTKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX JWT key. ","Example":"","Deprecated":"false"},{"Env":"DEX_RURL","EnvType":"string","EnvValue":"http://127.0.0.1:8080/callback","EnvDescription":"Dex redirect URL(http://argocd-dex-server.devtroncd:8080/callback)","Example":"","Deprecated":"false"},{"Env":"DEX_SCOPES","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_SECRET","EnvType":"string","EnvValue":"","EnvDescription":"Dex secret","Example":"","Deprecated":"false"},{"Env":"DEX_URL","EnvType":"string","EnvValue":"","EnvDescription":"Dex service endpoint with dex path(http://argocd-dex-server.devtroncd:5556/dex)","Example":"","Deprecated":"false"},{"Env":"ECR_REPO_NAME_PREFIX","EnvType":"string","EnvValue":"test/","EnvDescription":"Prefix for ECR repo to be created in does not exist","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of no-gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_LINKED_CI_ARTIFACT_COPY","EnvType":"bool","EnvValue":"false","EnvDescription":"Enable copying artifacts from parent CI pipeline to linked CI pipeline during creation","Example":"","Deprecated":"false"},{"Env":"ENABLE_PASSWORD_ENCRYPTION","EnvType":"bool","EnvValue":"true","EnvDescription":"enable password encryption","Example":"","Deprecated":"false"},{"Env":"EPHEMERAL_SERVER_VERSION_REGEX","EnvType":"string","EnvValue":"v[1-9]\\.\\b(2[3-9]\\|[3-9][0-9])\\b.*","EnvDescription":"ephemeral containers support version regex that is compared with k8sServerVersion","Example":"","Deprecated":"false"},{"Env":"EVENT_URL","EnvType":"string","EnvValue":"http://localhost:3000/notify","EnvDescription":"Notifier service url","Example":"","Deprecated":"false"},{"Env":"EXECUTE_WIRE_NIL_CHECKER","EnvType":"bool","EnvValue":"false","EnvDescription":"checks for any nil pointer in wire.go","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CI_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"To expose CI metrics","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"restart workload retrieval batch size ","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_WORKER_POOL_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"restart workload retrieval pool size","Example":"","Deprecated":"false"},{"Env":"FORCE_SECURITY_SCANNING","EnvType":"bool","EnvValue":"false","EnvDescription":"By enabling this no one can disable image scaning on ci-pipeline from UI","Example":"","Deprecated":"false"},{"Env":"GITHUB_ORG_NAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_USERNAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_REPO_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for Gitops repo being creation for argocd application","Example":"","Deprecated":"false"},{"Env":"GO_RUNTIME_ENV","EnvType":"string","EnvValue":"production","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace for grafana","Example":"","Deprecated":"false"},{"Env":"GRAFANA_ORG_ID","EnvType":"int","EnvValue":"2","EnvDescription":"Org ID for grafana for application metrics","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PASSWORD","EnvType":"string","EnvValue":"prom-operator","EnvDescription":"Password for grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PORT","EnvType":"string","EnvValue":"8090","EnvDescription":"Port for grafana micro-service","Example":"","Deprecated":"false"},{"Env":"GRAFANA_URL","EnvType":"string","EnvValue":"","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"Username for grafana ","Example":"","Deprecated":"false"},{"Env":"HIDE_API_TOKENS","EnvType":"bool","EnvValue":"false","EnvDescription":"Boolean flag for should the api tokens generated be hidden from the UI","Example":"","Deprecated":"false"},{"Env":"HIDE_IMAGE_TAGGING_HARD_DELETE","EnvType":"bool","EnvValue":"false","EnvDescription":"Flag to hide the hard delete option in the image tagging service","Example":"","Deprecated":"false"},{"Env":"IGNORE_AUTOCOMPLETE_AUTH_CHECK","EnvType":"bool","EnvValue":"false","EnvDescription":"flag for ignoring auth check in autocomplete apis.","Example":"","Deprecated":"false"},{"Env":"INSTALLED_MODULES","EnvType":"","EnvValue":"","EnvDescription":"List of installed modules given in helm values/yaml are written in cm and used by devtron to know which modules are given","Example":"security.trivy,security.clair","Deprecated":"false"},{"Env":"INSTALLER_CRD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"namespace where Custom Resource Definitions get installed","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_GROUP_NAME","EnvType":"string","EnvValue":"installer.devtron.ai","EnvDescription":"Devtron installer CRD group name, partially deprecated.","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_RESOURCE","EnvType":"string","EnvValue":"installers","EnvDescription":"Devtron installer CRD resource name, partially deprecated","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_VERSION","EnvType":"string","EnvValue":"v1alpha1","EnvDescription":"version of the CRDs. default is v1alpha1","Example":"","Deprecated":"false"},{"Env":"IS_AIR_GAP_ENVIRONMENT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"JwtExpirationTime","EnvType":"int","EnvValue":"120","EnvDescription":"JWT expiration time.","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Lens microservice timeout.","Example":"","Deprecated":"false"},{"Env":"LENS_URL","EnvType":"string","EnvValue":"http://lens-milandevtron-service:80","EnvDescription":"Lens micro-service URL","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LINKED_CI_ARTIFACT_COPY_LIMIT","EnvType":"int","EnvValue":"10","EnvDescription":"Maximum number of artifacts to copy from parent CI pipeline to linked CI pipeline","Example":"","Deprecated":"false"},{"Env":"LOGGER_DEV_MODE","EnvType":"bool","EnvValue":"false","EnvDescription":"Enables a different logger theme.","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_SESSION_PER_USER","EnvType":"int","EnvValue":"5","EnvDescription":"max no of cluster terminal pods can be created by an user","Example":"","Deprecated":"false"},{"Env":"MODULE_METADATA_API_URL","EnvType":"string","EnvValue":"https://api.devtron.ai/module?name=%s","EnvDescription":"Modules list and meta info will be fetched from this server, that is central api server of devtron.","Example":"","Deprecated":"false"},{"Env":"MODULE_STATUS_HANDLING_CRON_DURATION_MIN","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NOTIFICATION_MEDIUM","EnvType":"NotificationMedium","EnvValue":"rest","EnvDescription":"notification medium","Example":"","Deprecated":"false"},{"Env":"OTEL_COLLECTOR_URL","EnvType":"string","EnvValue":"","EnvDescription":"Opentelemetry URL ","Example":"","Deprecated":"false"},{"Env":"PARALLELISM_LIMIT_FOR_TAG_PROCESSING","EnvType":"int","EnvValue":"","EnvDescription":"App manual sync job parallel tag processing count.","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PLUGIN_NAME","EnvType":"string","EnvValue":"Pull images from container repository","EnvDescription":"Handles image retrieval from a container repository and triggers subsequent CI processes upon detecting new images.Current default plugin name: Pull Images from Container Repository.","Example":"","Deprecated":"false"},{"Env":"PROPAGATE_EXTRA_LABELS","EnvType":"bool","EnvValue":"false","EnvDescription":"Add additional propagate labels like api.devtron.ai/appName, api.devtron.ai/envName, api.devtron.ai/project along with the user defined ones.","Example":"","Deprecated":"false"},{"Env":"PROXY_SERVICE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"Proxy configuration for micro-service to be accessible on orhcestrator ingress","Example":"","Deprecated":"false"},{"Env":"REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER","EnvType":"bool","EnvValue":"false","EnvDescription":"To restrict the cluster terminal from user having non-super admin acceess","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"LocalDevMode","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable scoped variable option","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_FORMAT","EnvType":"string","EnvValue":"@{{%s}}","EnvDescription":"Its a scope format for varialbe name.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_HANDLE_PRIMITIVES","EnvType":"bool","EnvValue":"false","EnvDescription":"This describe should we handle primitives or not in scoped variable template parsing.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_NAME_REGEX","EnvType":"string","EnvValue":"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$","EnvDescription":"Regex for scoped variable name that must passed this regex.","Example":"","Deprecated":"false"},{"Env":"SOCKET_DISCONNECT_DELAY_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"The server closes a session when a client receiving connection have not been seen for a while.This delay is configured by this setting. By default the session is closed when a receiving connection wasn't seen for 5 seconds.","Example":"","Deprecated":"false"},{"Env":"SOCKET_HEARTBEAT_SECONDS","EnvType":"int","EnvValue":"25","EnvDescription":"In order to keep proxies and load balancers from closing long running http requests we need to pretend that the connection is active and send a heartbeat packet once in a while. This setting controls how often this is done. By default a heartbeat packet is sent every 25 seconds.","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SYSTEM_VAR_PREFIX","EnvType":"string","EnvValue":"DEVTRON_","EnvDescription":"Scoped variable prefix, variable name must have this prefix.","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"default","EnvDescription":"Cluster terminal default namespace","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_INACTIVE_DURATION_IN_MINS","EnvType":"int","EnvValue":"10","EnvDescription":"Timeout for cluster terminal to be inactive","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_STATUS_SYNC_In_SECS","EnvType":"int","EnvValue":"600","EnvDescription":"this is the time interval at which the status of the cluster terminal pod","Example":"","Deprecated":"false"},{"Env":"TEST_APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_LOG_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PASSWORD","EnvType":"string","EnvValue":"postgrespw","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PORT","EnvType":"string","EnvValue":"55000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_FOR_FAILED_CI_BUILD","EnvType":"string","EnvValue":"15","EnvDescription":"Timeout for Failed CI build ","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_IN_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"timeout to compute the urls from services and ingress objects of an application","Example":"","Deprecated":"false"},{"Env":"USER_SESSION_DURATION_SECONDS","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_API_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 API for listing artifacts in Listing the images in pipeline","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_GIT_CLI","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable git cli","Example":"","Deprecated":"false"},{"Env":"USE_RBAC_CREATION_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 for RBAC creation","Example":"","Deprecated":"false"},{"Env":"VARIABLE_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"This is used to control caching of all the scope variables defined in the system.","Example":"","Deprecated":"false"},{"Env":"VARIABLE_EXPRESSION_REGEX","EnvType":"string","EnvValue":"@{{([^}]+)}}","EnvDescription":"Scoped variable expression regex","Example":"","Deprecated":"false"},{"Env":"WEBHOOK_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"If you want to continue using jenkins for CI then please provide this for authentication of requests","Example":"","Deprecated":"false"}]},{"Category":"GITOPS","Fields":[{"Env":"ACD_CM","EnvType":"string","EnvValue":"argocd-cm","EnvDescription":"Name of the argocd CM","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"To pass the argocd namespace","Example":"","Deprecated":"false"},{"Env":"ACD_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"Password for the Argocd (deprecated)","Example":"","Deprecated":"false"},{"Env":"ACD_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"User name for argocd","Example":"","Deprecated":"false"},{"Env":"GITOPS_SECRET_NAME","EnvType":"string","EnvValue":"devtron-gitops-secret","EnvDescription":"devtron-gitops-secret","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS","EnvType":"string","EnvValue":"Deployment,Rollout,StatefulSet,ReplicaSet","EnvDescription":"this holds the list of k8s resource names which support replicas key. this list used in hibernate/un hibernate process","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS_BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"this the batch size to control no of above resources can be parsed in one go to determine hibernate status","Example":"","Deprecated":"false"}]},{"Category":"INFRA_SETUP","Fields":[{"Env":"DASHBOARD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Dashboard micro-service URL","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Dashboard micro-service namespace","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_PORT","EnvType":"string","EnvValue":"3000","EnvDescription":"Port for dashboard micro-service","Example":"","Deprecated":"false"},{"Env":"DEX_HOST","EnvType":"string","EnvValue":"http://localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_PORT","EnvType":"string","EnvValue":"5556","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_PROTOCOL","EnvType":"string","EnvValue":"REST","EnvDescription":"Protocol to connect with git-sensor micro-service","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"pick_first\"}","EnvDescription":"git-sensor grpc service config","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Timeout for getting response from the git-sensor","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_URL","EnvType":"string","EnvValue":"127.0.0.1:7070","EnvDescription":"git-sensor micro-service url ","Example":"","Deprecated":"false"},{"Env":"HELM_CLIENT_URL","EnvType":"string","EnvValue":"127.0.0.1:50051","EnvDescription":"Kubelink micro-service url ","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_RECEIVE_MSG_SIZE","EnvType":"int","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_SEND_MSG_SIZE","EnvType":"int","EnvValue":"4","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"round_robin\"}","EnvDescription":"kubelink grpc service config","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"Application name","Example":"","Deprecated":"false"},{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"Database for casbin","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"address of postgres service","Example":"postgresql-postgresql.devtroncd","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"postgres database to be made connection with","Example":"orchestrator, casbin, git_sensor, lens","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"{password}","EnvDescription":"password for postgres, associated with PG_USER","Example":"confidential ;)","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"port of postgresql service","Example":"5432","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for read operation in postgres","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"user for postgres","Example":"postgres","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for write operation in postgres","Example":"","Deprecated":"false"}]},{"Category":"RBAC","Fields":[{"Env":"ENFORCER_CACHE","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable enforcer cache.","Example":"","Deprecated":"false"},{"Env":"ENFORCER_CACHE_EXPIRATION_IN_SEC","EnvType":"int","EnvValue":"86400","EnvDescription":"Expiration time (in seconds) for enforcer cache. ","Example":"","Deprecated":"false"},{"Env":"ENFORCER_MAX_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"Maximum batch size for the enforcer.","Example":"","Deprecated":"false"},{"Env":"USE_CASBIN_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable casbin V2 API","Example":"","Deprecated":"false"}]}] \ No newline at end of file +[{"Category":"CD","Fields":[{"Env":"ARGO_APP_MANUAL_SYNC_TIME","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"CD_FLUX_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status for flux cd pipeline","Example":"","Deprecated":"false"},{"Env":"CD_HELM_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status ","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time for CD pipeline status","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_TIMEOUT_DURATION","EnvType":"string","EnvValue":"20","EnvDescription":"Timeout for CD pipeline to get healthy","Example":"","Deprecated":"false"},{"Env":"DEPLOY_STATUS_CRON_GET_PIPELINE_DEPLOYED_WITHIN_HOURS","EnvType":"int","EnvValue":"12","EnvDescription":"This flag is used to fetch the deployment status of the application. It retrieves the status of deployments that occurred between 12 hours and 10 minutes prior to the current time. It fetches non-terminal statuses.","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"1","EnvDescription":"Context timeout for gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"6","EnvDescription":"Context timeout for no gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CD_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable migration of external argocd application to devtron pipeline","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_FLUX_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable flux application services","Example":"","Deprecated":"false"},{"Env":"FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking flux app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking helm app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"IS_INTERNAL_USE","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled then cd pipeline and helm apps will not need the deployment app type mandatorily. Couple this flag with HIDE_GITOPS_OR_HELM_OPTION (in Dashborad) and if gitops is configured and allowed for the env, pipeline/ helm app will gitops else no-gitops.","Example":"","Deprecated":"false"},{"Env":"MIGRATE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"migrate deployment config data from charts table to deployment_config table","Example":"","Deprecated":"false"},{"Env":"PIPELINE_DEGRADED_TIME","EnvType":"string","EnvValue":"10","EnvDescription":"Time to mark a pipeline degraded if not healthy in defined time","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_DEVTRON_APP","EnvType":"int","EnvValue":"1","EnvDescription":"Count for devtron application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP","EnvType":"int","EnvValue":"0","EnvDescription":"Count for external helm application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_HELM_APP","EnvType":"int","EnvValue":"1","EnvDescription":"To set the history limit for the helm app being deployed through devtron","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_LINKED_HELM_APP","EnvType":"int","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOULD_CHECK_NAMESPACE_ON_CLONE","EnvType":"bool","EnvValue":"false","EnvDescription":"should we check if namespace exists or not while cloning app","Example":"","Deprecated":"false"},{"Env":"USE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"use deployment config data from deployment_config table","Example":"","Deprecated":"true"},{"Env":"VALIDATE_EXT_APP_CHART_TYPE","EnvType":"bool","EnvValue":"false","EnvDescription":"validate external flux app chart","Example":"","Deprecated":"false"}]},{"Category":"CI_BUILDX","Fields":[{"Env":"ASYNC_BUILDX_CACHE_EXPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async container image cache export","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_MODE_MIN","EnvType":"bool","EnvValue":"false","EnvDescription":"To set build cache mode to minimum in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_INTERRUPTION_MAX_RETRY","EnvType":"int","EnvValue":"3","EnvDescription":"Maximum number of retries for buildx builder interruption","Example":"","Deprecated":"false"}]},{"Category":"CI_RUNNER","Fields":[{"Env":"AZURE_ACCOUNT_KEY","EnvType":"string","EnvValue":"","EnvDescription":"If blob storage is being used of azure then pass the secret key to access the bucket","Example":"","Deprecated":"false"},{"Env":"AZURE_ACCOUNT_NAME","EnvType":"string","EnvValue":"","EnvDescription":"Account name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_CACHE","EnvType":"string","EnvValue":"","EnvDescription":"Cache bucket name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_LOG","EnvType":"string","EnvValue":"","EnvDescription":"Log bucket for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_CONNECTION_INSECURE","EnvType":"bool","EnvValue":"true","EnvDescription":"Azure gateway connection allows insecure if true","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_URL","EnvType":"string","EnvValue":"http://devtron-minio.devtroncd:9000","EnvDescription":"Sent to CI runner for blob","Example":"","Deprecated":"false"},{"Env":"BASE_LOG_LOCATION_PATH","EnvType":"string","EnvValue":"/home/devtron/","EnvDescription":"Used to store, download logs of ci workflow, artifact","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_GCP_CREDENTIALS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"GCP cred json for GCS blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_PROVIDER","EnvType":"","EnvValue":"S3","EnvDescription":"Blob storage provider name(AWS/GCP/Azure)","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ACCESS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"S3 access key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_BUCKET_VERSIONED","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable buctet versioning for blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT","EnvType":"string","EnvValue":"","EnvDescription":"S3 endpoint URL for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT_INSECURE","EnvType":"bool","EnvValue":"false","EnvDescription":"To use insecure s3 endpoint","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_SECRET_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Secret key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/devtron/buildx","EnvDescription":"Path for the buildx cache","Example":"","Deprecated":"false"},{"Env":"BUILDX_K8S_DRIVER_OPTIONS","EnvType":"string","EnvValue":"","EnvDescription":"To enable the k8s driver and pass args for k8s driver in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_PROVENANCE_MODE","EnvType":"string","EnvValue":"","EnvDescription":"provinance is set to true by default by docker. this will add some build related data in generated build manifest.it also adds some unknown:unknown key:value pair which may not be compatible by some container registries. with buildx k8s driver , provinenance=true is causing issue when push manifest to quay registry, so setting it to false","Example":"","Deprecated":"false"},{"Env":"BUILD_LOG_TTL_VALUE_IN_SECS","EnvType":"int","EnvValue":"3600","EnvDescription":"This is the time that the pods of ci/pre-cd/post-cd live after completion state.","Example":"","Deprecated":"false"},{"Env":"CACHE_LIMIT","EnvType":"int64","EnvValue":"5000000000","EnvDescription":"Cache limit.","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for Pre/Post cd ","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"Toleration key for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"Toleration value for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for Pre/Post CD(AWF,System)","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"cd-runner","EnvDescription":"Service account to be used in Pre/Post CD pod","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for CI","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CI","Example":"","Deprecated":"false"},{"Env":"CI_IGNORE_DOCKER_CACHE","EnvType":"bool","EnvValue":"","EnvDescription":"Ignoring docker cache ","Example":"","Deprecated":"false"},{"Env":"CI_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for build logs","Example":"","Deprecated":"false"},{"Env":"CI_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Toleration key for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"","EnvDescription":"Toleration value for CI","Example":"","Deprecated":"false"},{"Env":"CI_RUNNER_DOCKER_MTU_VALUE","EnvType":"int","EnvValue":"-1","EnvDescription":"this is to control the bytes of inofrmation passed in a network packet in ci-runner. default is -1 (defaults to the underlying node mtu value)","Example":"","Deprecated":"false"},{"Env":"CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"this is to control the no of linked pipelines should be hanled in one go when a ci-success event of an parent ci is received","Example":"","Deprecated":"false"},{"Env":"CI_VOLUME_MOUNTS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"additional volume mount data for CI and JOB","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for CI(AWF,System)","Example":"","Deprecated":"false"},{"Env":"DEFAULT_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"arsenal-v1/ci-artifacts","EnvDescription":"Key location for artifacts being created","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_BUCKET","EnvType":"string","EnvValue":"devtron-pro-ci-logs","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"arsenal-v1","EnvDescription":"Bucket prefix for build logs","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET","EnvType":"string","EnvValue":"ci-caching","EnvDescription":"Bucket name for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"Build Cache bucket region","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"","EnvDescription":"Bucket prefix for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_LOGS_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"Namespace for devtron stack","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_TIMEOUT","EnvType":"int64","EnvValue":"3600","EnvDescription":"Timeout for Pre/Post-Cd to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CI_IMAGE","EnvType":"string","EnvValue":"686244538589.dkr.ecr.us-east-2.amazonaws.com/cirunner:47","EnvDescription":"To pass the ci-runner image","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TARGET_PLATFORM","EnvType":"string","EnvValue":"","EnvDescription":"Default architecture for buildx","Example":"","Deprecated":"false"},{"Env":"DOCKER_BUILD_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/docker","EnvDescription":"Path to store cache of docker build (/var/lib/docker-\u003e for legacy docker build, /var/lib/devtron-\u003e for buildx)","Example":"","Deprecated":"false"},{"Env":"ENABLE_BUILD_CONTEXT","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable build context in Devtron.","Example":"","Deprecated":"false"},{"Env":"ENABLE_WORKFLOW_EXECUTION_STAGE","EnvType":"bool","EnvValue":"true","EnvDescription":"if enabled then we will display build stages separately for CI/Job/Pre-Post CD","Example":"true","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_CM_NAME","EnvType":"string","EnvValue":"blob-storage-cm","EnvDescription":"name of the config map(contains bucket name, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_SECRET_NAME","EnvType":"string","EnvValue":"blob-storage-secret","EnvDescription":"name of the secret(contains password, accessId,passKeys, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"This is an array of strings used when submitting a workflow for pre or post-CD execution. If the ","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_API_SECRET","EnvType":"string","EnvValue":"devtroncd-secret","EnvDescription":"External CI API secret.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_PAYLOAD","EnvType":"string","EnvValue":"{\"ciProjectDetails\":[{\"gitRepository\":\"https://github.com/vikram1601/getting-started-nodejs.git\",\"checkoutPath\":\"./abc\",\"commitHash\":\"239077135f8cdeeccb7857e2851348f558cb53d3\",\"commitTime\":\"2022-10-30T20:00:00\",\"branch\":\"master\",\"message\":\"Update README.md\",\"author\":\"User Name \"}],\"dockerImage\":\"445808685819.dkr.ecr.us-east-2.amazonaws.com/orch:23907713-2\"}","EnvDescription":"External CI payload with project details.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_WEB_HOOK_URL","EnvType":"string","EnvValue":"","EnvDescription":"default is {{HOST_URL}}/orchestrator/webhook/ext-ci. It is used for external ci.","Example":"","Deprecated":"false"},{"Env":"IGNORE_CM_CS_IN_CI_JOB","EnvType":"bool","EnvValue":"false","EnvDescription":"Ignore CM/CS in CI-pipeline as Job","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_COUNT","EnvType":"int","EnvValue":"0","EnvDescription":"push artifact(image) in ci retry count ","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_INTERVAL","EnvType":"int","EnvValue":"5","EnvDescription":"image retry interval takes value in seconds","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCANNER_ENDPOINT","EnvType":"string","EnvValue":"http://image-scanner-new-demo-devtroncd-service.devtroncd:80","EnvDescription":"Image-scanner micro-service URL","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_MAX_RETRIES","EnvType":"int","EnvValue":"3","EnvDescription":"Max retry count for image-scanning","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay for the image-scaning to start","Example":"","Deprecated":"false"},{"Env":"IN_APP_LOGGING_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"Used in case of argo workflow is enabled. If enabled logs push will be managed by us, else will be managed by argo workflow.","Example":"","Deprecated":"false"},{"Env":"MAX_CD_WORKFLOW_RUNNER_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time pre/post-cd-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MAX_CI_WORKFLOW_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time CI-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MODE","EnvType":"string","EnvValue":"DEV","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"localhost:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_HOST","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd/webhook/msg/nats","EnvDescription":"Orchestrator micro-service URL ","Example":"","Deprecated":"false"},{"Env":"ORCH_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"Orchestrator token","Example":"","Deprecated":"false"},{"Env":"PRE_CI_CACHE_PATH","EnvType":"string","EnvValue":"/devtroncd-cache","EnvDescription":"Cache path for Pre CI tasks","Example":"","Deprecated":"false"},{"Env":"SHOW_DOCKER_BUILD_ARGS","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable showing the args passed for CI in build logs","Example":"","Deprecated":"false"},{"Env":"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL","EnvType":"bool","EnvValue":"false","EnvDescription":"To skip cache Push/Pull for ci job","Example":"","Deprecated":"false"},{"Env":"SKIP_CREATING_ECR_REPO","EnvType":"bool","EnvValue":"false","EnvDescription":"By disabling this ECR repo won't get created if it's not available on ECR from build configuration","Example":"","Deprecated":"false"},{"Env":"TERMINATION_GRACE_PERIOD_SECS","EnvType":"int","EnvValue":"180","EnvDescription":"this is the time given to workflow pods to shutdown. (grace full termination time)","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_QUERY_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 query for listing artifacts","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CD_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post cd","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post ci","Example":"","Deprecated":"false"},{"Env":"USE_BUILDX","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable buildx feature globally","Example":"","Deprecated":"false"},{"Env":"USE_DOCKER_API_TO_GET_DIGEST","EnvType":"bool","EnvValue":"false","EnvDescription":"when user do not pass the digest then this flag controls , finding the image digest using docker API or not. if set to true we get the digest from docker API call else use docker pull command. [logic in ci-runner]","Example":"","Deprecated":"false"},{"Env":"USE_EXTERNAL_NODE","EnvType":"bool","EnvValue":"false","EnvDescription":"It is used in case of Pre/ Post Cd with run in application mode. If enabled the node lebels are read from EXTERNAL_CD_NODE_LABEL_SELECTOR else from CD_NODE_LABEL_SELECTOR MODE: if the vale is DEV, it will read the local kube config file or else from the cluser location.","Example":"","Deprecated":"false"},{"Env":"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD","EnvType":"bool","EnvValue":"false","EnvDescription":"To use the same tag in container image as that of git tag","Example":"","Deprecated":"false"},{"Env":"WF_CONTROLLER_INSTANCE_ID","EnvType":"string","EnvValue":"devtron-runner","EnvDescription":"Workflow controller instance ID.","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_CACHE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"flag is used to configure how Docker caches are handled during a CI/CD ","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"ci-runner","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"-","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ADDITIONAL_NODE_GROUP_LABELS","EnvType":"","EnvValue":"","EnvDescription":"Add comma separated list of additional node group labels to default labels","Example":"karpenter.sh/nodepool,cloud.google.com/gke-nodepool","Deprecated":"false"},{"Env":"APP_SYNC_IMAGE","EnvType":"string","EnvValue":"quay.io/devtron/chart-sync:1227622d-132-3775","EnvDescription":"For the app sync image, this image will be used in app-manual sync job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_JOB_RESOURCES_OBJ","EnvType":"string","EnvValue":"","EnvDescription":"To pass the resource of app sync","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"chart-sync","EnvDescription":"Service account to be used in app sync Job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SHUTDOWN_WAIT_DURATION","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_AUTO_SYNC_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled all argocd application will have auto sync enabled","Example":"true","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_COUNT_ON_CONFLICT","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_DELAY_ON_CONFLICT","EnvType":"int","EnvValue":"1","EnvDescription":"Delay on retrying the maifest commit the on gitops","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_COUNT","EnvType":"int","EnvValue":"4","EnvDescription":"Retry count for registering a GitOps repository to ArgoCD","Example":"3","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay (in Seconds) between the retries for registering a GitOps repository to ArgoCD","Example":"5","Deprecated":"false"},{"Env":"BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"there is feature to get URL's of services/ingresses. so to extract those, we need to parse all the servcie and ingress objects of the application. this BATCH_SIZE flag controls the no of these objects get parsed in one go.","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host for the devtron stack","Example":"","Deprecated":"false"},{"Env":"CD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PORT","EnvType":"string","EnvValue":"8000","EnvDescription":"Port for pre/post-cd","Example":"","Deprecated":"false"},{"Env":"CExpirationTime","EnvType":"int","EnvValue":"600","EnvDescription":"Caching expiration time.","Example":"","Deprecated":"false"},{"Env":"CI_TRIGGER_CRON_TIME","EnvType":"int","EnvValue":"2","EnvDescription":"For image poll plugin","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_STATUS_UPDATE_CRON","EnvType":"string","EnvValue":"*/5 * * * *","EnvDescription":"Cron schedule for CI pipeline status","Example":"","Deprecated":"false"},{"Env":"CLI_CMD_TIMEOUT_GLOBAL_SECONDS","EnvType":"int","EnvValue":"0","EnvDescription":"Used in git cli opeartion timeout","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_BACKGROUND_REFRESH_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"Enable background refresh of cluster overview cache","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"Enable caching for cluster overview data","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_MAX_PARALLEL_CLUSTERS","EnvType":"int","EnvValue":"15","EnvDescription":"Maximum number of clusters to fetch in parallel during refresh","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_MAX_STALE_DATA_SECONDS","EnvType":"int","EnvValue":"30","EnvDescription":"Maximum age of cached data in seconds before warning","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_REFRESH_INTERVAL_SECONDS","EnvType":"int","EnvValue":"15","EnvDescription":"Background cache refresh interval in seconds","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STATUS_CRON_TIME","EnvType":"int","EnvValue":"15","EnvDescription":"Cron schedule for cluster status on resource browser","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TIMEOUT","EnvType":"float64","EnvValue":"3600","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEVTRON_BOM_URL","EnvType":"string","EnvValue":"https://raw.githubusercontent.com/devtron-labs/devtron/%s/charts/devtron/devtron-bom.yaml","EnvDescription":"Path to devtron-bom.yaml of devtron charts, used for module installation and devtron upgrade","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEX_SECRET_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of dex secret","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_CHART_NAME","EnvType":"string","EnvValue":"devtron-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Name of the Devtron Helm release. ","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of the Devtron Helm release","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_URL","EnvType":"string","EnvValue":"https://helm.devtron.ai","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLATION_TYPE","EnvType":"string","EnvValue":"","EnvDescription":"Devtron Installation type(EA/Full)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_MODULES_PATH","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"Path to devtron installer modules, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_RELEASE_PATH","EnvType":"string","EnvValue":"installer.release","EnvDescription":"Path to devtron installer release, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_MODULES_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_OPERATOR_BASE_PATH","EnvType":"string","EnvValue":"","EnvDescription":"Base path for devtron operator, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_SECRET_NAME","EnvType":"string","EnvValue":"devtron-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_VERSION_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.release","EnvDescription":"devtron operator version identifier in helm values yaml","Example":"","Deprecated":"false"},{"Env":"DEX_CID","EnvType":"string","EnvValue":"example-app","EnvDescription":"dex client id ","Example":"","Deprecated":"false"},{"Env":"DEX_CLIENT_ID","EnvType":"string","EnvValue":"argo-cd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CSTOREKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX CSTOREKEY.","Example":"","Deprecated":"false"},{"Env":"DEX_JWTKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX JWT key. ","Example":"","Deprecated":"false"},{"Env":"DEX_RURL","EnvType":"string","EnvValue":"http://127.0.0.1:8080/callback","EnvDescription":"Dex redirect URL(http://argocd-dex-server.devtroncd:8080/callback)","Example":"","Deprecated":"false"},{"Env":"DEX_SCOPES","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_SECRET","EnvType":"string","EnvValue":"","EnvDescription":"Dex secret","Example":"","Deprecated":"false"},{"Env":"DEX_URL","EnvType":"string","EnvValue":"","EnvDescription":"Dex service endpoint with dex path(http://argocd-dex-server.devtroncd:5556/dex)","Example":"","Deprecated":"false"},{"Env":"ECR_REPO_NAME_PREFIX","EnvType":"string","EnvValue":"test/","EnvDescription":"Prefix for ECR repo to be created in does not exist","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of no-gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_LINKED_CI_ARTIFACT_COPY","EnvType":"bool","EnvValue":"false","EnvDescription":"Enable copying artifacts from parent CI pipeline to linked CI pipeline during creation","Example":"","Deprecated":"false"},{"Env":"ENABLE_PASSWORD_ENCRYPTION","EnvType":"bool","EnvValue":"true","EnvDescription":"enable password encryption","Example":"","Deprecated":"false"},{"Env":"EPHEMERAL_SERVER_VERSION_REGEX","EnvType":"string","EnvValue":"v[1-9]\\.\\b(2[3-9]\\|[3-9][0-9])\\b.*","EnvDescription":"ephemeral containers support version regex that is compared with k8sServerVersion","Example":"","Deprecated":"false"},{"Env":"EVENT_URL","EnvType":"string","EnvValue":"http://localhost:3000/notify","EnvDescription":"Notifier service url","Example":"","Deprecated":"false"},{"Env":"EXECUTE_WIRE_NIL_CHECKER","EnvType":"bool","EnvValue":"false","EnvDescription":"checks for any nil pointer in wire.go","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CI_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"To expose CI metrics","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"restart workload retrieval batch size ","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_WORKER_POOL_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"restart workload retrieval pool size","Example":"","Deprecated":"false"},{"Env":"FORCE_SECURITY_SCANNING","EnvType":"bool","EnvValue":"false","EnvDescription":"By enabling this no one can disable image scaning on ci-pipeline from UI","Example":"","Deprecated":"false"},{"Env":"GITHUB_ORG_NAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_USERNAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_REPO_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for Gitops repo being creation for argocd application","Example":"","Deprecated":"false"},{"Env":"GO_RUNTIME_ENV","EnvType":"string","EnvValue":"production","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace for grafana","Example":"","Deprecated":"false"},{"Env":"GRAFANA_ORG_ID","EnvType":"int","EnvValue":"2","EnvDescription":"Org ID for grafana for application metrics","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PASSWORD","EnvType":"string","EnvValue":"prom-operator","EnvDescription":"Password for grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PORT","EnvType":"string","EnvValue":"8090","EnvDescription":"Port for grafana micro-service","Example":"","Deprecated":"false"},{"Env":"GRAFANA_URL","EnvType":"string","EnvValue":"","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"Username for grafana ","Example":"","Deprecated":"false"},{"Env":"HIDE_API_TOKENS","EnvType":"bool","EnvValue":"false","EnvDescription":"Boolean flag for should the api tokens generated be hidden from the UI","Example":"","Deprecated":"false"},{"Env":"HIDE_IMAGE_TAGGING_HARD_DELETE","EnvType":"bool","EnvValue":"false","EnvDescription":"Flag to hide the hard delete option in the image tagging service","Example":"","Deprecated":"false"},{"Env":"IGNORE_AUTOCOMPLETE_AUTH_CHECK","EnvType":"bool","EnvValue":"false","EnvDescription":"flag for ignoring auth check in autocomplete apis.","Example":"","Deprecated":"false"},{"Env":"INSTALLED_MODULES","EnvType":"","EnvValue":"","EnvDescription":"List of installed modules given in helm values/yaml are written in cm and used by devtron to know which modules are given","Example":"security.trivy,security.clair","Deprecated":"false"},{"Env":"INSTALLER_CRD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"namespace where Custom Resource Definitions get installed","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_GROUP_NAME","EnvType":"string","EnvValue":"installer.devtron.ai","EnvDescription":"Devtron installer CRD group name, partially deprecated.","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_RESOURCE","EnvType":"string","EnvValue":"installers","EnvDescription":"Devtron installer CRD resource name, partially deprecated","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_VERSION","EnvType":"string","EnvValue":"v1alpha1","EnvDescription":"version of the CRDs. default is v1alpha1","Example":"","Deprecated":"false"},{"Env":"IS_AIR_GAP_ENVIRONMENT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"JwtExpirationTime","EnvType":"int","EnvValue":"120","EnvDescription":"JWT expiration time.","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Lens microservice timeout.","Example":"","Deprecated":"false"},{"Env":"LENS_URL","EnvType":"string","EnvValue":"http://lens-milandevtron-service:80","EnvDescription":"Lens micro-service URL","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LINKED_CI_ARTIFACT_COPY_LIMIT","EnvType":"int","EnvValue":"10","EnvDescription":"Maximum number of artifacts to copy from parent CI pipeline to linked CI pipeline","Example":"","Deprecated":"false"},{"Env":"LOGGER_DEV_MODE","EnvType":"bool","EnvValue":"false","EnvDescription":"Enables a different logger theme.","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_SESSION_PER_USER","EnvType":"int","EnvValue":"5","EnvDescription":"max no of cluster terminal pods can be created by an user","Example":"","Deprecated":"false"},{"Env":"MODULE_METADATA_API_URL","EnvType":"string","EnvValue":"https://api.devtron.ai/module?name=%s","EnvDescription":"Modules list and meta info will be fetched from this server, that is central api server of devtron.","Example":"","Deprecated":"false"},{"Env":"MODULE_STATUS_HANDLING_CRON_DURATION_MIN","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NOTIFICATION_MEDIUM","EnvType":"NotificationMedium","EnvValue":"rest","EnvDescription":"notification medium","Example":"","Deprecated":"false"},{"Env":"OTEL_COLLECTOR_URL","EnvType":"string","EnvValue":"","EnvDescription":"Opentelemetry URL ","Example":"","Deprecated":"false"},{"Env":"PARALLELISM_LIMIT_FOR_TAG_PROCESSING","EnvType":"int","EnvValue":"","EnvDescription":"App manual sync job parallel tag processing count.","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PLUGIN_NAME","EnvType":"string","EnvValue":"Pull images from container repository","EnvDescription":"Handles image retrieval from a container repository and triggers subsequent CI processes upon detecting new images.Current default plugin name: Pull Images from Container Repository.","Example":"","Deprecated":"false"},{"Env":"PROPAGATE_EXTRA_LABELS","EnvType":"bool","EnvValue":"false","EnvDescription":"Add additional propagate labels like api.devtron.ai/appName, api.devtron.ai/envName, api.devtron.ai/project along with the user defined ones.","Example":"","Deprecated":"false"},{"Env":"PROXY_SERVICE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"Proxy configuration for micro-service to be accessible on orhcestrator ingress","Example":"","Deprecated":"false"},{"Env":"REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER","EnvType":"bool","EnvValue":"false","EnvDescription":"To restrict the cluster terminal from user having non-super admin acceess","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"LocalDevMode","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable scoped variable option","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_FORMAT","EnvType":"string","EnvValue":"@{{%s}}","EnvDescription":"Its a scope format for varialbe name.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_HANDLE_PRIMITIVES","EnvType":"bool","EnvValue":"false","EnvDescription":"This describe should we handle primitives or not in scoped variable template parsing.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_NAME_REGEX","EnvType":"string","EnvValue":"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$","EnvDescription":"Regex for scoped variable name that must passed this regex.","Example":"","Deprecated":"false"},{"Env":"SOCKET_DISCONNECT_DELAY_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"The server closes a session when a client receiving connection have not been seen for a while.This delay is configured by this setting. By default the session is closed when a receiving connection wasn't seen for 5 seconds.","Example":"","Deprecated":"false"},{"Env":"SOCKET_HEARTBEAT_SECONDS","EnvType":"int","EnvValue":"25","EnvDescription":"In order to keep proxies and load balancers from closing long running http requests we need to pretend that the connection is active and send a heartbeat packet once in a while. This setting controls how often this is done. By default a heartbeat packet is sent every 25 seconds.","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SYSTEM_VAR_PREFIX","EnvType":"string","EnvValue":"DEVTRON_","EnvDescription":"Scoped variable prefix, variable name must have this prefix.","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"default","EnvDescription":"Cluster terminal default namespace","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_INACTIVE_DURATION_IN_MINS","EnvType":"int","EnvValue":"10","EnvDescription":"Timeout for cluster terminal to be inactive","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_STATUS_SYNC_In_SECS","EnvType":"int","EnvValue":"600","EnvDescription":"this is the time interval at which the status of the cluster terminal pod","Example":"","Deprecated":"false"},{"Env":"TEST_APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_LOG_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PASSWORD","EnvType":"string","EnvValue":"postgrespw","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PORT","EnvType":"string","EnvValue":"55000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_FOR_FAILED_CI_BUILD","EnvType":"string","EnvValue":"15","EnvDescription":"Timeout for Failed CI build ","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_IN_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"timeout to compute the urls from services and ingress objects of an application","Example":"","Deprecated":"false"},{"Env":"USER_SESSION_DURATION_SECONDS","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_API_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 API for listing artifacts in Listing the images in pipeline","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_GIT_CLI","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable git cli","Example":"","Deprecated":"false"},{"Env":"USE_RBAC_CREATION_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 for RBAC creation","Example":"","Deprecated":"false"},{"Env":"VARIABLE_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"This is used to control caching of all the scope variables defined in the system.","Example":"","Deprecated":"false"},{"Env":"VARIABLE_EXPRESSION_REGEX","EnvType":"string","EnvValue":"@{{([^}]+)}}","EnvDescription":"Scoped variable expression regex","Example":"","Deprecated":"false"},{"Env":"WEBHOOK_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"If you want to continue using jenkins for CI then please provide this for authentication of requests","Example":"","Deprecated":"false"}]},{"Category":"GITOPS","Fields":[{"Env":"ACD_CM","EnvType":"string","EnvValue":"argocd-cm","EnvDescription":"Name of the argocd CM","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"To pass the argocd namespace","Example":"","Deprecated":"false"},{"Env":"ACD_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"Password for the Argocd (deprecated)","Example":"","Deprecated":"false"},{"Env":"ACD_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"User name for argocd","Example":"","Deprecated":"false"},{"Env":"GITOPS_SECRET_NAME","EnvType":"string","EnvValue":"devtron-gitops-secret","EnvDescription":"devtron-gitops-secret","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS","EnvType":"string","EnvValue":"Deployment,Rollout,StatefulSet,ReplicaSet","EnvDescription":"this holds the list of k8s resource names which support replicas key. this list used in hibernate/un hibernate process","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS_BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"this the batch size to control no of above resources can be parsed in one go to determine hibernate status","Example":"","Deprecated":"false"}]},{"Category":"INFRA_SETUP","Fields":[{"Env":"DASHBOARD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Dashboard micro-service URL","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Dashboard micro-service namespace","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_PORT","EnvType":"string","EnvValue":"3000","EnvDescription":"Port for dashboard micro-service","Example":"","Deprecated":"false"},{"Env":"DEX_HOST","EnvType":"string","EnvValue":"http://localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_PORT","EnvType":"string","EnvValue":"5556","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_PROTOCOL","EnvType":"string","EnvValue":"REST","EnvDescription":"Protocol to connect with git-sensor micro-service","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"pick_first\"}","EnvDescription":"git-sensor grpc service config","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Timeout for getting response from the git-sensor","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_URL","EnvType":"string","EnvValue":"127.0.0.1:7070","EnvDescription":"git-sensor micro-service url ","Example":"","Deprecated":"false"},{"Env":"HELM_CLIENT_URL","EnvType":"string","EnvValue":"127.0.0.1:50051","EnvDescription":"Kubelink micro-service url ","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_RECEIVE_MSG_SIZE","EnvType":"int","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_SEND_MSG_SIZE","EnvType":"int","EnvValue":"4","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"round_robin\"}","EnvDescription":"kubelink grpc service config","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"Application name","Example":"","Deprecated":"false"},{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"Database for casbin","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"address of postgres service","Example":"postgresql-postgresql.devtroncd","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"postgres database to be made connection with","Example":"orchestrator, casbin, git_sensor, lens","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"{password}","EnvDescription":"password for postgres, associated with PG_USER","Example":"confidential ;)","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"port of postgresql service","Example":"5432","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for read operation in postgres","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"user for postgres","Example":"postgres","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for write operation in postgres","Example":"","Deprecated":"false"}]},{"Category":"RBAC","Fields":[{"Env":"ENFORCER_CACHE","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable enforcer cache.","Example":"","Deprecated":"false"},{"Env":"ENFORCER_CACHE_EXPIRATION_IN_SEC","EnvType":"int","EnvValue":"86400","EnvDescription":"Expiration time (in seconds) for enforcer cache. ","Example":"","Deprecated":"false"},{"Env":"ENFORCER_MAX_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"Maximum batch size for the enforcer.","Example":"","Deprecated":"false"},{"Env":"USE_CASBIN_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable casbin V2 API","Example":"","Deprecated":"false"}]}] \ No newline at end of file diff --git a/env_gen.md b/env_gen.md index a328621194..539bb63e5f 100644 --- a/env_gen.md +++ b/env_gen.md @@ -157,6 +157,11 @@ | CI_TRIGGER_CRON_TIME | int |2 | For image poll plugin | | false | | CI_WORKFLOW_STATUS_UPDATE_CRON | string |*/5 * * * * | Cron schedule for CI pipeline status | | false | | CLI_CMD_TIMEOUT_GLOBAL_SECONDS | int |0 | Used in git cli opeartion timeout | | false | + | CLUSTER_OVERVIEW_BACKGROUND_REFRESH_ENABLED | bool |true | Enable background refresh of cluster overview cache | | false | + | CLUSTER_OVERVIEW_CACHE_ENABLED | bool |true | Enable caching for cluster overview data | | false | + | CLUSTER_OVERVIEW_MAX_PARALLEL_CLUSTERS | int |15 | Maximum number of clusters to fetch in parallel during refresh | | false | + | CLUSTER_OVERVIEW_MAX_STALE_DATA_SECONDS | int |30 | Maximum age of cached data in seconds before warning | | false | + | CLUSTER_OVERVIEW_REFRESH_INTERVAL_SECONDS | int |15 | Background cache refresh interval in seconds | | false | | CLUSTER_STATUS_CRON_TIME | int |15 | Cron schedule for cluster status on resource browser | | false | | CONSUMER_CONFIG_JSON | string | | | | false | | DEFAULT_LOG_TIME_LIMIT | int64 |1 | | | false | diff --git a/go.mod b/go.mod index 64a2a60e28..2ca691a765 100644 --- a/go.mod +++ b/go.mod @@ -122,7 +122,7 @@ require ( github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blang/semver/v4 v4.0.0 github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/bombsimon/logrusr/v2 v2.0.1 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.12.0 // indirect @@ -338,7 +338,7 @@ require ( replace ( github.com/argoproj/argo-workflows/v3 v3.5.13 => github.com/devtron-labs/argo-workflows/v3 v3.5.13 github.com/cyphar/filepath-securejoin v0.4.1 => github.com/cyphar/filepath-securejoin v0.3.6 // indirect - github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251121075820-d6692a4fd1f2 - github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251121075820-d6692a4fd1f2 + github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251201122208-2efa348401af + github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251201122208-2efa348401af go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 ) diff --git a/go.sum b/go.sum index 23845c1f2f..e07ccd3ee1 100644 --- a/go.sum +++ b/go.sum @@ -237,10 +237,10 @@ github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzq github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devtron-labs/argo-workflows/v3 v3.5.13 h1:3pINq0gXOSeTw2z/vYe+j80lRpSN5Rp/8mfQORh8SmU= github.com/devtron-labs/argo-workflows/v3 v3.5.13/go.mod h1:/vqxcovDPT4zqr4DjR5v7CF8ggpY1l3TSa2CIG3jmjA= -github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251121075820-d6692a4fd1f2 h1:w2tNN1KAxslJOCwwLUBvnvY+W3hQcow6Jwvtc9tl2Ps= -github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251121075820-d6692a4fd1f2/go.mod h1:9LCkYfiWaEKIBkmxw9jX1GujvEMyHwmDtVsatffAkeU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251121075820-d6692a4fd1f2 h1:P2q/ART/+WJi8NT3hlnTvtJtG80ZY5jMbObtE5C4ADc= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251121075820-d6692a4fd1f2/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251201122208-2efa348401af h1:A2nIGww/F5tq2PwA5FRTxIbT1vBQiaGm5KlpCz2Mr10= +github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251201122208-2efa348401af/go.mod h1:9LCkYfiWaEKIBkmxw9jX1GujvEMyHwmDtVsatffAkeU= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251201122208-2efa348401af h1:7W0Tb31lLKyEeJRgI8F7SAoWmmt2PRbhgeY32RfjDcs= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251201122208-2efa348401af/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= github.com/devtron-labs/go-bitbucket v0.9.60-beta h1:VEx1jvDgdtDPS6A1uUFoaEi0l1/oLhbr+90xOwr6sDU= github.com/devtron-labs/go-bitbucket v0.9.60-beta/go.mod h1:GnuiCesvh8xyHeMCb+twm8lBR/kQzJYSKL28ZfObp1Y= github.com/devtron-labs/protos v0.0.3-0.20250323220609-ecf8a0f7305e h1:U6UdYbW8a7xn5IzFPd8cywjVVPfutGJCudjePAfL/Hs= diff --git a/internal/sql/repository/app/AppRepository.go b/internal/sql/repository/app/AppRepository.go index 93710da779..d237ec8f7a 100644 --- a/internal/sql/repository/app/AppRepository.go +++ b/internal/sql/repository/app/AppRepository.go @@ -18,13 +18,14 @@ package app import ( "fmt" + "time" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/team/repository" "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" - "time" ) type App struct { @@ -96,6 +97,11 @@ type AppRepository interface { FindJobCount() (int, error) UpdateAppOfferingModeForAppIds(successAppIds []*int, appOfferingMode string, userId int32) error + + // Overview methods + FindAllChartStoreApps() ([]*App, error) + FindAllActiveDevtronAppsInTimeRange(from, to *time.Time) ([]*App, error) + FindAllActiveChartStoreAppsInTimeRange(from, to *time.Time) ([]*App, error) } const DevtronApp = "DevtronApp" @@ -542,3 +548,52 @@ func (repo AppRepositoryImpl) UpdateAppOfferingModeForAppIds(successAppIds []*in _, err := repo.dbConnection.Query(app, query, appOfferingMode, userId, time.Now(), pg.In(successAppIds)) return err } + +// Overview methods implementation +func (repo AppRepositoryImpl) FindAllChartStoreApps() ([]*App, error) { + var apps []*App + err := repo.dbConnection.Model(&apps).Where("active = ?", true).Where("app_type = ?", helper.ChartStoreApp).Select() + return apps, err +} + +func (repo AppRepositoryImpl) FindAllActiveDevtronAppsInTimeRange(from, to *time.Time) ([]*App, error) { + var apps []*App + query := repo.dbConnection.Model(&apps). + Where("active = ?", true). + Where("app_type = ?", helper.CustomApp) // Only normal CI/CD apps (appType = 0) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + err := query.Select() + if err != nil { + repo.logger.Errorw("error getting apps in time range", "from", from, "to", to, "err", err) + return nil, err + } + return apps, nil +} + +func (repo AppRepositoryImpl) FindAllActiveChartStoreAppsInTimeRange(from, to *time.Time) ([]*App, error) { + var apps []*App + query := repo.dbConnection.Model(&apps). + Where("active = ?", true). + Where("app_type = ?", helper.ChartStoreApp) // Only chart store apps (appType = 1) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + err := query.Select() + if err != nil { + repo.logger.Errorw("error getting chart store apps in time range", "from", from, "to", to, "err", err) + return nil, err + } + return apps, nil +} diff --git a/internal/sql/repository/deploymentConfig/repository.go b/internal/sql/repository/deploymentConfig/repository.go index 578205287e..be987fa8ef 100644 --- a/internal/sql/repository/deploymentConfig/repository.go +++ b/internal/sql/repository/deploymentConfig/repository.go @@ -18,6 +18,7 @@ package deploymentConfig import ( "fmt" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" @@ -66,6 +67,8 @@ type Repository interface { GetAllConfigsForActiveApps() ([]*DeploymentConfig, error) GetAllEnvLevelConfigsWithReleaseMode(releaseMode string) ([]*DeploymentConfig, error) GetDeploymentAppTypeForChartStoreAppByAppId(appId int) (string, error) + // GitOps count methods + GetGitOpsEnabledPipelineCount() (int, error) } type RepositoryImpl struct { @@ -298,3 +301,37 @@ func (impl *RepositoryImpl) GetDeploymentAppTypeForChartStoreAppByAppId(appId in Select() return result.DeploymentAppType, err } + +// GetGitOpsEnabledPipelineCount returns count of GitOps enabled pipelines +// This handles lazy migration from pipeline table to deployment_config table +func (impl *RepositoryImpl) GetGitOpsEnabledPipelineCount() (int, error) { + var count int + // Complex query to handle lazy migration: + // 1. Count pipelines that have deployment_config entry with argo_cd + // 2. Count pipelines that don't have deployment_config entry but have argo_cd in pipeline table + query := ` + SELECT COUNT(DISTINCT p.id) + FROM pipeline p + JOIN environment e ON p.environment_id = e.id + JOIN app a ON p.app_id = a.id + LEFT JOIN deployment_config dc ON dc.app_id = p.app_id + AND dc.environment_id = p.environment_id + AND dc.active = true + WHERE p.deleted = false + AND e.active = true + AND a.active = true + AND ( + -- Case 1: deployment_config exists and is argo_cd + dc.deployment_app_type = 'argo_cd' + OR + -- Case 2: no deployment_config entry, fallback to pipeline table + (dc.id IS NULL AND p.deployment_app_type = 'argo_cd') + ) + ` + + _, err := impl.dbConnection.Query(&count, query) + if err != nil { + return 0, fmt.Errorf("error getting GitOps enabled pipeline count: %w", err) + } + return count, nil +} diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 6b6001a8dd..452e1f336f 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -19,16 +19,19 @@ package pipelineConfig import ( "context" "errors" + "fmt" + "time" + apiBean "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/constants" + bean2 "github.com/devtron-labs/devtron/pkg/overview/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "go.opentelemetry.io/otel" "go.uber.org/zap" - "time" ) type CdWorkflowRepository interface { @@ -80,6 +83,12 @@ type CdWorkflowRepository interface { MigrateIsArtifactUploaded(wfrId int, isArtifactUploaded bool) MigrateCdArtifactLocation(wfrId int, cdArtifactLocation string) FindDeployedCdWorkflowRunnersByPipelineId(pipelineId int) ([]*CdWorkflowRunner, error) + + // Overview methods + GetDeploymentCountInTimeRange(from, to *time.Time) (int, error) + GetDeploymentWorkflowsForStatusTrend(from, to *time.Time) ([]DeploymentStatusData, error) + GetBlockedDeploymentsForTrend(from, to *time.Time) ([]BlockedDeploymentData, error) + GetTriggeredCDPipelines(from, to *time.Time, sortOrder bean2.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) } type CdWorkflowRepositoryImpl struct { @@ -182,6 +191,15 @@ type AppDeploymentStatus struct { WfrId int `json:"wfrId,omitempty"` } +type DeploymentStatusData struct { + StartedOn time.Time `db:"started_on"` + Status string `db:"status"` +} + +type BlockedDeploymentData struct { + StartedOn time.Time `db:"started_on"` +} + func NewCdWorkflowRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) *CdWorkflowRepositoryImpl { return &CdWorkflowRepositoryImpl{ dbConnection: dbConnection, @@ -811,3 +829,143 @@ SELECT "app_id","env_id","ci_artifact_id","parent_ci_artifact","scanned" FROM Ra } return runners, nil } + +// Overview methods implementation +func (impl *CdWorkflowRepositoryImpl) GetDeploymentCountInTimeRange(from, to *time.Time) (int, error) { + var count int + query := impl.dbConnection.Model((*CdWorkflowRunner)(nil)). + Where("created_on >= ? AND created_on <= ?", from, to). + Where("workflow_type = ?", "DEPLOY") + + count, err := query.Count() + if err != nil { + impl.logger.Errorw("error getting deployment count in time range", "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +// GetDeploymentWorkflowsForStatusTrend returns all deployment workflows in the date range including deployments of deleted pipelines +func (impl *CdWorkflowRepositoryImpl) GetDeploymentWorkflowsForStatusTrend(from, to *time.Time) ([]DeploymentStatusData, error) { + var deployments []DeploymentStatusData + + query := ` + SELECT cwr.started_on, cwr.status + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cwr.cd_workflow_id = cw.id + INNER JOIN pipeline p ON cw.pipeline_id = p.id + INNER JOIN app a ON p.app_id = a.id + WHERE cwr.started_on >= ? AND cwr.started_on <= ? + AND cwr.workflow_type = 'DEPLOY' + ORDER BY cwr.started_on + ` + + _, err := impl.dbConnection.Query(&deployments, query, from, to) + if err != nil { + impl.logger.Errorw("error fetching deployment workflows for status trend", "from", from, "to", to, "err", err) + return nil, err + } + + return deployments, nil +} + +// GetBlockedDeploymentsForTrend returns all deployment attempts that were blocked by security scan policy +// This includes: +// 1. Deployments blocked BEFORE workflow creation (tracked in resource_filter_evaluation_audit with filter_type=6) +// 2. Deployments that started but failed due to vulnerability (tracked in cd_workflow_runner with message) +func (impl *CdWorkflowRepositoryImpl) GetBlockedDeploymentsForTrend(from, to *time.Time) ([]BlockedDeploymentData, error) { + var blockedDeployments []BlockedDeploymentData + + // Query to get blocked deployments from two sources: + // 1. resource_filter_evaluation_audit: Deployments blocked by security scan policy BEFORE workflow creation + // - filter_type = 6 (SECURITY_SCAN_POLICY) + // - subject_type = 0 (Artifact) + // - reference_type = 1 (PipelineStage) + // 2. cd_workflow_runner: Deployments that started but failed due to vulnerability + // - status = 'Failed' + // - message contains 'Found vulnerability on image' + query := ` + SELECT created_on as started_on + FROM resource_filter_evaluation_audit + WHERE created_on >= ? AND created_on <= ? + AND filter_type = 6 + AND subject_type = 0 + AND reference_type = 1 + UNION ALL + SELECT cwr.started_on + FROM cd_workflow_runner cwr + WHERE cwr.started_on >= ? AND cwr.started_on <= ? + AND cwr.workflow_type = 'DEPLOY' + AND cwr.status = 'Failed' + AND cwr.message LIKE '%Found vulnerability on image%' + ORDER BY started_on + ` + + _, err := impl.dbConnection.Query(&blockedDeployments, query, from, to, from, to) + if err != nil { + impl.logger.Errorw("error fetching blocked deployments for trend", "from", from, "to", to, "err", err) + return nil, err + } + + return blockedDeployments, nil +} + +func (impl *CdWorkflowRepositoryImpl) GetTriggeredCDPipelines(from, to *time.Time, sortOrder bean2.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) { + var results []PipelineUsageData + var totalCount int + + // First get the total count + countQuery := ` + SELECT COUNT(DISTINCT p.id) + FROM pipeline p + INNER JOIN app a ON p.app_id = a.id + INNER JOIN environment e ON p.environment_id = e.id + LEFT JOIN cd_workflow cw ON p.id = cw.pipeline_id + LEFT JOIN cd_workflow_runner cwr ON cw.id = cwr.cd_workflow_id + AND cwr.created_on >= ? AND cwr.created_on <= ? + AND cwr.workflow_type = 'DEPLOY' AND a.app_type = 0 + WHERE p.deleted = false AND a.active = true + ` + + _, err := impl.dbConnection.Query(&totalCount, countQuery, from, to) + if err != nil { + impl.logger.Errorw("error getting total count of CD pipelines", "from", from, "to", to, "err", err) + return nil, 0, err + } + + // Build the main query with sorting and pagination + orderClause := "ORDER BY trigger_count DESC, p.id DESC" + if sortOrder == bean2.ASC { + orderClause = "ORDER BY trigger_count ASC, p.id ASC" + } + + query := fmt.Sprintf(` + SELECT + a.id as app_id, + e.id as env_id, + p.id as pipeline_id, + p.pipeline_name as pipeline_name, + a.app_name, + e.environment_name as env_name, + COALESCE(COUNT(cwr.id), 0) as trigger_count + FROM pipeline p + INNER JOIN app a ON p.app_id = a.id + INNER JOIN environment e ON p.environment_id = e.id + LEFT JOIN cd_workflow cw ON p.id = cw.pipeline_id + LEFT JOIN cd_workflow_runner cwr ON cw.id = cwr.cd_workflow_id + AND cwr.created_on >= ? AND cwr.created_on <= ? + AND cwr.workflow_type = 'DEPLOY' AND a.app_type = 0 + WHERE p.deleted = false AND a.active = true + GROUP BY a.id, e.id, p.id, p.pipeline_name, a.app_name, e.environment_name + %s + LIMIT ? OFFSET ? + `, orderClause) + + _, err = impl.dbConnection.Query(&results, query, from, to, limit, offset) + if err != nil { + impl.logger.Errorw("error getting triggered CD pipelines", "from", from, "to", to, "sortOrder", sortOrder, "limit", limit, "offset", offset, "err", err) + return nil, 0, err + } + + return results, totalCount, nil +} diff --git a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go index c6adb8b7a8..2920f991e8 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go @@ -19,8 +19,14 @@ package pipelineConfig import ( "context" "fmt" + "strconv" + "time" + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/ciPipeline" + workflowConstants "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/constants" buildCommonBean "github.com/devtron-labs/devtron/pkg/build/pipeline/bean/common" repository2 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/sql" @@ -29,8 +35,6 @@ import ( "github.com/go-pg/pg/orm" "go.opentelemetry.io/otel" "go.uber.org/zap" - "strconv" - "time" ) type CiPipeline struct { @@ -88,6 +92,18 @@ type CiPipelineScript struct { sql.AuditLog } +// WorkflowWithAppEnvDetails represents a workflow with app and environment details for security enablement +type WorkflowWithAppEnvDetails struct { + WorkflowId int `pg:"workflow_id"` + WorkflowName string `pg:"workflow_name"` + AppId int `pg:"app_id"` + AppName string `pg:"app_name"` + Environments string `pg:"environments"` // Comma-separated environment names + CiPipelineIds string `pg:"ci_pipeline_ids"` // Comma-separated CI pipeline IDs (for RBAC) + ScanEnabled bool `pg:"scan_enabled"` + CiPipelineType string `pg:"ci_pipeline_type"` // CI pipeline type (CI_BUILD, LINKED, EXTERNAL, CI_JOB, LINKED_CD) +} + // CiPipelineRepository : // use config.CiPipelineConfigReadService instead of directly using CiPipelineRepository type CiPipelineRepository interface { @@ -146,6 +162,19 @@ type CiPipelineRepository interface { GetLinkedCiPipelines(ctx context.Context, ciPipelineId int) ([]*CiPipeline, error) GetDownStreamInfo(ctx context.Context, sourceCiPipelineId int, appNameMatch, envNameMatch string, req *pagination.RepositoryRequest) ([]ciPipeline.LinkedCIDetails, int, error) + GetActiveCiPipelineCount() (int, error) + GetActiveCiPipelineCountInTimeRange(from, to *time.Time) (int, error) + GetScanEnabledCiPipelineCount() (int, error) + GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd() (int, error) + // Security Enablement methods + FindAppWorkflowsWithScanDetails(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string, sortBy, sortOrder string, offset, size int) ([]*WorkflowWithAppEnvDetails, int, error) + FindAllAppWorkflowIdsByFilters(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string) ([]int, error) + FindAppWorkflowsByIds(workflowIds []int) ([]*WorkflowWithAppEnvDetails, error) + BulkUpdateScanEnabled(workflowIds []int, scanEnabled bool, userId int32) error + + // External CI count methods for performance optimization + GetActiveExternalCiPipelineCount() (int, error) + GetActiveExternalCiPipelineCountInTimeRange(from, to *time.Time) (int, error) } type CiPipelineRepositoryImpl struct { @@ -726,3 +755,458 @@ func (impl *CiPipelineRepositoryImpl) GetDownStreamInfo(ctx context.Context, sou } return linkedCIDetails, totalCount, err } + +// Count methods implementation for performance optimization +func (impl *CiPipelineRepositoryImpl) GetActiveCiPipelineCount() (int, error) { + count, err := impl.dbConnection.Model((*CiPipeline)(nil)). + Where("active = ?", true). + Where("deleted = ?", false). + Where("ci_pipeline_type = ?", buildCommonBean.CI_BUILD.ToString()). + Count() + + if err != nil { + impl.logger.Errorw("error getting active CI pipeline count", "err", err) + return 0, err + } + return count, nil +} + +func (impl *CiPipelineRepositoryImpl) GetActiveCiPipelineCountInTimeRange(from, to *time.Time) (int, error) { + query := impl.dbConnection.Model((*CiPipeline)(nil)). + Where("active = ?", true). + Where("deleted = ?", false) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + count, err := query.Count() + if err != nil { + impl.logger.Errorw("error getting active CI pipeline count in time range", "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +func (impl *CiPipelineRepositoryImpl) GetScanEnabledCiPipelineCount() (int, error) { + count, err := impl.dbConnection.Model((*CiPipeline)(nil)). + Where("active = ?", true). + Where("deleted = ?", false). + Where("scan_enabled = ?", true). + Where("ci_pipeline_type = ?", buildCommonBean.CI_BUILD.ToString()). + Count() + + if err != nil { + impl.logger.Errorw("error getting scan enabled CI pipeline count", "err", err) + return 0, err + } + return count, nil +} + +// FindAppWorkflowsWithScanDetails fetches app workflows with scan enablement details for security enablement page +func (impl *CiPipelineRepositoryImpl) FindAppWorkflowsWithScanDetails(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string, sortBy, sortOrder string, offset, size int) ([]*WorkflowWithAppEnvDetails, int, error) { + var results []*WorkflowWithAppEnvDetails + + // Optimized query strategy: + // 1. Single CTE to filter and aggregate in one pass + // 2. Avoid redundant joins by reusing data from first join + // 3. Use window function for total count to avoid separate CTE + // 4. Filter early to reduce rows processed in aggregation + // 5. Dynamic sorting based on sortBy and sortOrder parameters + query := ` + WITH workflow_data AS ( + SELECT + aw.id as workflow_id, + aw.name as workflow_name, + aw.app_id, + a.app_name, + cp.id as ci_pipeline_id, + cp.scan_enabled, + cp.ci_pipeline_type, + e.environment_name + FROM app_workflow aw + INNER JOIN app a ON a.id = aw.app_id AND a.active = true AND a.app_type != ? + INNER JOIN app_workflow_mapping awm ON awm.app_workflow_id = aw.id AND awm.active = true AND awm.type = ? + INNER JOIN ci_pipeline cp ON cp.id = awm.component_id AND cp.active = true AND cp.deleted = false + LEFT JOIN pipeline p ON p.ci_pipeline_id = cp.id AND p.deleted = false + LEFT JOIN environment e ON e.id = p.environment_id + WHERE aw.active = true + ` + + // Initialize query params with Job type constant and CI_PIPELINE constant + queryParams := []interface{}{helper.Job, appWorkflow.CIPIPELINE} + + // Apply app filter early + if len(appIds) > 0 { + query += " AND aw.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Apply search filter early + if searchQuery != "" { + query += " AND aw.name ILIKE ?" + queryParams = append(queryParams, "%"+searchQuery+"%") + } + + // Apply scan enablement filter early using constants + // Empty string means fetch all workflows + if scanEnablement == string(workflowConstants.ScanEnabled) { + query += " AND cp.scan_enabled = true" + } else if scanEnablement == string(workflowConstants.ScanNotEnabled) { + query += " AND cp.scan_enabled = false" + } + // If scanEnablement is empty or any other value, no filter is applied (fetch all) + + // Apply cluster/env filters early using direct join instead of EXISTS subquery + if len(clusterIds) > 0 || len(envIds) > 0 { + // Add filter condition - at least one pipeline must match + // We'll handle this by ensuring the workflow has matching pipelines + query += ` + AND EXISTS ( + SELECT 1 FROM pipeline p2 + INNER JOIN environment e2 ON e2.id = p2.environment_id + WHERE p2.ci_pipeline_id = cp.id + AND p2.deleted = false + ` + if len(clusterIds) > 0 { + query += " AND e2.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + if len(envIds) > 0 { + query += " AND e2.id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + query += ` + ) + ` + } + + query += ` + ) + SELECT + workflow_id, + workflow_name, + app_id, + app_name, + MAX(scan_enabled::int)::boolean as scan_enabled, + STRING_AGG(DISTINCT environment_name, ', ' ORDER BY environment_name) FILTER (WHERE environment_name IS NOT NULL) as environments, + STRING_AGG(DISTINCT ci_pipeline_id::text, ',' ORDER BY ci_pipeline_id::text) as ci_pipeline_ids, + MAX(ci_pipeline_type) as ci_pipeline_type + FROM workflow_data + GROUP BY workflow_id, workflow_name, app_id, app_name + ` + + // Add dynamic sorting + orderByClause := impl.buildWorkflowSortClause(sortBy, sortOrder) + query += orderByClause + + // Fetch all results (no pagination in SQL) + var allResults []*WorkflowWithAppEnvDetails + _, err := impl.dbConnection.Query(&allResults, query, queryParams...) + if err != nil { + impl.logger.Errorw("error fetching workflows with app and env details", "err", err, "query", query, "params", queryParams) + return nil, 0, err + } + + // Calculate total count + totalCount := len(allResults) + + // Apply pagination in code + start := offset + end := offset + size + + // Handle edge cases + if start > totalCount { + start = totalCount + } + if end > totalCount { + end = totalCount + } + if start < 0 { + start = 0 + } + + // Slice the results for pagination + results = allResults[start:end] + + return results, totalCount, nil +} + +// buildWorkflowSortClause builds the ORDER BY clause for workflow listing based on sortBy and sortOrder +func (impl *CiPipelineRepositoryImpl) buildWorkflowSortClause(sortBy, sortOrder string) string { + // Default sorting + orderByClause := " ORDER BY app_name ASC, workflow_name ASC" + + // Validate and sanitize sortOrder using constants + validSortOrder := string(workflowConstants.SortOrderAsc) + if sortOrder == string(workflowConstants.SortOrderDesc) { + validSortOrder = string(workflowConstants.SortOrderDesc) + } + + // Build ORDER BY based on sortBy field using constants + switch sortBy { + case string(workflowConstants.WorkflowSortByWorkflowName): + orderByClause = " ORDER BY workflow_name " + validSortOrder + ", app_name " + validSortOrder + case string(workflowConstants.WorkflowSortByAppName): + orderByClause = " ORDER BY app_name " + validSortOrder + ", workflow_name " + validSortOrder + case string(workflowConstants.WorkflowSortByScanEnabled): + orderByClause = " ORDER BY scan_enabled " + validSortOrder + ", app_name " + validSortOrder + ", workflow_name " + validSortOrder + default: + // Default: sort by app_name, then workflow_name + orderByClause = " ORDER BY app_name " + validSortOrder + ", workflow_name " + validSortOrder + } + + return orderByClause +} + +// FindAllAppWorkflowIdsByFilters fetches ALL app workflow IDs matching the filters (no pagination) +// This is used for bulk operations where we need to get all matching workflow IDs +func (impl *CiPipelineRepositoryImpl) FindAllAppWorkflowIdsByFilters(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string) ([]int, error) { + query := ` + WITH workflow_data AS ( + SELECT + aw.id as workflow_id, + cp.scan_enabled + FROM app_workflow aw + INNER JOIN app a ON a.id = aw.app_id AND a.active = true AND a.app_type != ? + INNER JOIN app_workflow_mapping awm ON awm.app_workflow_id = aw.id AND awm.active = true AND awm.type = ? + INNER JOIN ci_pipeline cp ON cp.id = awm.component_id AND cp.active = true AND cp.deleted = false + LEFT JOIN pipeline p ON p.ci_pipeline_id = cp.id AND p.deleted = false + LEFT JOIN environment e ON e.id = p.environment_id + WHERE aw.active = true + ` + + // Initialize query params with Job type constant and CI_PIPELINE constant + queryParams := []interface{}{helper.Job, appWorkflow.CIPIPELINE} + + // Apply filters + if len(appIds) > 0 { + query += " AND aw.app_id IN (?)" + queryParams = append(queryParams, pg.In(appIds)) + } + + if len(clusterIds) > 0 { + query += " AND e.cluster_id IN (?)" + queryParams = append(queryParams, pg.In(clusterIds)) + } + + if len(envIds) > 0 { + query += " AND e.id IN (?)" + queryParams = append(queryParams, pg.In(envIds)) + } + + if searchQuery != "" { + query += " AND aw.name ILIKE ?" + queryParams = append(queryParams, "%"+searchQuery+"%") + } + + // Apply scan enablement filter using constants + if scanEnablement == string(workflowConstants.ScanEnabled) { + query += " AND cp.scan_enabled = true" + } else if scanEnablement == string(workflowConstants.ScanNotEnabled) { + query += " AND cp.scan_enabled = false" + } + + // Close the CTE and select distinct workflow IDs + query += ` + ) + SELECT DISTINCT workflow_id + FROM workflow_data + ORDER BY workflow_id + ` + + var workflowIds []int + _, err := impl.dbConnection.Query(&workflowIds, query, queryParams...) + if err != nil { + impl.logger.Errorw("error fetching all app workflow IDs by filters", "err", err, "query", query, "params", queryParams) + return nil, err + } + + return workflowIds, nil +} + +// FindAppWorkflowsByIds fetches app workflows by specific workflow IDs (for RBAC checks in bulk operations) +func (impl *CiPipelineRepositoryImpl) FindAppWorkflowsByIds(workflowIds []int) ([]*WorkflowWithAppEnvDetails, error) { + if len(workflowIds) == 0 { + return []*WorkflowWithAppEnvDetails{}, nil + } + + query := ` + WITH workflow_data AS ( + SELECT + aw.id as workflow_id, + aw.name as workflow_name, + aw.app_id, + a.app_name, + cp.id as ci_pipeline_id, + cp.scan_enabled, + cp.ci_pipeline_type, + e.environment_name + FROM app_workflow aw + INNER JOIN app a ON a.id = aw.app_id AND a.active = true AND a.app_type != ? + INNER JOIN app_workflow_mapping awm ON awm.app_workflow_id = aw.id AND awm.active = true AND awm.type = ? + INNER JOIN ci_pipeline cp ON cp.id = awm.component_id AND cp.active = true AND cp.deleted = false + LEFT JOIN pipeline p ON p.ci_pipeline_id = cp.id AND p.deleted = false + LEFT JOIN environment e ON e.id = p.environment_id + WHERE aw.active = true + AND aw.id = ANY(?) + ) + SELECT + workflow_id, + workflow_name, + app_id, + app_name, + MAX(scan_enabled::int)::boolean as scan_enabled, + STRING_AGG(DISTINCT environment_name, ', ' ORDER BY environment_name) FILTER (WHERE environment_name IS NOT NULL) as environments, + STRING_AGG(DISTINCT ci_pipeline_id::text, ',' ORDER BY ci_pipeline_id::text) as ci_pipeline_ids, + MAX(ci_pipeline_type) as ci_pipeline_type + FROM workflow_data + GROUP BY workflow_id, workflow_name, app_id, app_name + ORDER BY workflow_name ASC + ` + + var results []*WorkflowWithAppEnvDetails + _, err := impl.dbConnection.Query(&results, query, helper.Job, appWorkflow.CIPIPELINE, pg.Array(workflowIds)) + if err != nil { + impl.logger.Errorw("error fetching app workflows by IDs", "err", err, "workflowIds", workflowIds) + return nil, err + } + + return results, nil +} + +// BulkUpdateScanEnabled updates scan_enabled for CI pipelines in app workflows +// workflowIds are app_workflow.id values, we fetch ci_pipeline_ids from app_workflow_mapping and update ci_pipeline +func (impl *CiPipelineRepositoryImpl) BulkUpdateScanEnabled(workflowIds []int, scanEnabled bool, userId int32) error { + if len(workflowIds) == 0 { + return nil + } + + // Get distinct ci_pipeline_ids from app_workflow_mapping table for the given app workflow IDs + query := ` + SELECT DISTINCT awm.component_id + FROM app_workflow_mapping awm + WHERE awm.app_workflow_id IN (?) + AND awm.type = ? + AND awm.active = true + ` + + var ciPipelineIds []int + _, err := impl.dbConnection.Query(&ciPipelineIds, query, pg.In(workflowIds), appWorkflow.CIPIPELINE) + + if err != nil { + impl.logger.Errorw("error fetching ci_pipeline_ids from app_workflow_mapping", "workflowIds", workflowIds, "err", err) + return err + } + + if len(ciPipelineIds) == 0 { + impl.logger.Warnw("no ci_pipeline_ids found for given app_workflow_ids", "workflowIds", workflowIds) + return fmt.Errorf("no ci_pipeline_ids found for given app_workflow_ids") + } + + // Update scan_enabled for the fetched ci_pipeline_ids + _, err = impl.dbConnection.Model(&CiPipeline{}). + Set("scan_enabled = ?", scanEnabled). + Set("updated_on = ?", time.Now()). + Set("updated_by = ?", userId). + Where("id IN (?)", pg.In(ciPipelineIds)). + Where("active = ?", true). + Where("deleted = ?", false). + Update() + + if err != nil { + impl.logger.Errorw("error bulk updating scan_enabled", "ciPipelineIds", ciPipelineIds, "scanEnabled", scanEnabled, "err", err) + return err + } + + impl.logger.Infow("successfully updated scan_enabled for ci_pipelines", "workflowIds", workflowIds, "ciPipelineIds", ciPipelineIds, "scanEnabled", scanEnabled) + return nil +} + +// External CI count methods implementation +func (impl *CiPipelineRepositoryImpl) GetActiveExternalCiPipelineCount() (int, error) { + count, err := impl.dbConnection.Model((*ExternalCiPipeline)(nil)). + Where("active = ?", true). + Count() + + if err != nil { + impl.logger.Errorw("error getting active external CI pipeline count", "err", err) + return 0, err + } + return count, nil +} + +func (impl *CiPipelineRepositoryImpl) GetActiveExternalCiPipelineCountInTimeRange(from, to *time.Time) (int, error) { + query := impl.dbConnection.Model((*ExternalCiPipeline)(nil)). + Where("active = ?", true) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + count, err := query.Count() + if err != nil { + impl.logger.Errorw("error getting active external CI pipeline count in time range", "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +// GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd returns count of active CI pipelines that have IMAGE SCAN plugin configured +// in POST-CI or PRE-CD stages. Image scanning plugin is identified by plugin name 'IMAGE SCAN' (constant IMAGE_SCANNING_PLUGIN from pipelineStage.go) +func (impl *CiPipelineRepositoryImpl) GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd() (int, error) { + var count int + + // Query to count distinct CI pipelines that have IMAGE SCAN plugin configured in POST-CI or PRE-CD stages + // We need to check both: + // 1. POST-CI stages (pipeline_stage.ci_pipeline_id is set) + // 2. PRE-CD stages (pipeline_stage.cd_pipel1ine_id is set, need to join with pipeline table to get ci_pipeline_id) + query := ` + SELECT COUNT(DISTINCT ci_pipeline_id) FROM ( + -- CI pipelines with IMAGE SCAN plugin in POST-CI stage + SELECT cp.id as ci_pipeline_id + FROM ci_pipeline cp + INNER JOIN pipeline_stage ps ON ps.ci_pipeline_id = cp.id + INNER JOIN pipeline_stage_step pss ON pss.pipeline_stage_id = ps.id + INNER JOIN plugin_metadata pm ON pm.id = pss.ref_plugin_id + WHERE cp.active = true + AND cp.deleted = false + AND ps.deleted = false + AND pss.deleted = false + AND pm.deleted = false + AND pm.name = 'IMAGE SCAN' + + UNION + + -- CI pipelines with IMAGE SCAN plugin in PRE-CD stage + SELECT p.ci_pipeline_id + FROM pipeline p + INNER JOIN pipeline_stage ps ON ps.cd_pipeline_id = p.id + INNER JOIN pipeline_stage_step pss ON pss.pipeline_stage_id = ps.id + INNER JOIN plugin_metadata pm ON pm.id = pss.ref_plugin_id + INNER JOIN ci_pipeline cp ON cp.id = p.ci_pipeline_id + WHERE p.deleted = false + AND cp.active = true + AND cp.deleted = false + AND ps.deleted = false + AND pss.deleted = false + AND pm.deleted = false + AND pm.name = 'IMAGE SCAN' + AND p.ci_pipeline_id IS NOT NULL + ) AS combined_pipelines + ` + + _, err := impl.dbConnection.Query(&count, query) + if err != nil { + impl.logger.Errorw("error getting CI pipeline count with image scanning plugin", "err", err) + return 0, err + } + + return count, nil +} diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index ef43684d21..2e9c527010 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -17,11 +17,13 @@ package pipelineConfig import ( + "fmt" "time" "github.com/devtron-labs/devtron/internal/sql/constants" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" + "github.com/devtron-labs/devtron/pkg/overview/bean" "github.com/go-pg/pg" "go.uber.org/zap" ) @@ -53,6 +55,49 @@ type CiWorkflowRepository interface { MigrateIsArtifactUploaded(wfId int, isArtifactUploaded bool) MigrateCiArtifactLocation(wfId int, artifactLocation string) + + // Overview methods + GetTriggeredCIPipelines(from, to *time.Time, sortOrder bean.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) + GetCiBuildCountInTimeRange(from, to *time.Time) (int, error) + GetSuccessfulCIBuildsForBuildTime(from, to *time.Time) ([]WorkflowBuildTime, error) + GetCIBuildsForStatusTrend(from, to *time.Time) ([]WorkflowStatusData, error) +} + +// Data structures for overview queries +type PipelineUsageData struct { + AppID int `json:"appId"` // Required for both CI and CD pipelines + EnvID int `json:"envId,omitempty"` // Only for deployment pipelines + PipelineID int `json:"pipelineId"` + PipelineName string `json:"pipelineName"` + AppName string `json:"appName"` + EnvName string `json:"envName,omitempty"` // Only for deployment pipelines + TriggerCount int `json:"triggerCount"` +} + +type ActivityData struct { + ID int `json:"id"` + Type string `json:"type"` + AppName string `json:"appName"` + EnvName string `json:"envName,omitempty"` + CiPipelineName string `json:"ciPipelineName,omitempty"` + Status string `json:"status"` + TriggeredAt time.Time `json:"triggeredAt"` + TriggeredBy int `json:"triggeredBy"` +} + +type WorkflowDetails struct { + Name string `json:"name"` + CreatedOn time.Time `json:"createdOn"` +} + +type WorkflowBuildTime struct { + StartedOn time.Time `db:"started_on"` + FinishedOn time.Time `db:"finished_on"` +} + +type WorkflowStatusData struct { + StartedOn time.Time `db:"started_on"` + Status string `db:"status"` } type CiWorkflowRepositoryImpl struct { @@ -423,3 +468,123 @@ func (impl *CiWorkflowRepositoryImpl) MigrateCiArtifactLocation(wfId int, artifa impl.logger.Errorw("error occurred while updating ci_artifact_location", "wfId", wfId, "err", err) } } + +// Overview methods implementation +func (impl *CiWorkflowRepositoryImpl) GetTriggeredCIPipelines(from, to *time.Time, sortOrder bean.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) { + var results []PipelineUsageData + var totalCount int + + // First get the total count + countQuery := ` + SELECT COUNT(DISTINCT cp.id) + FROM ci_pipeline cp + INNER JOIN app a ON cp.app_id = a.id + LEFT JOIN ci_workflow cw ON cp.id = cw.ci_pipeline_id + AND cw.started_on >= ? AND cw.started_on <= ? + WHERE cp.deleted = false AND a.app_type = 0 AND a.active = true + ` + + _, err := impl.dbConnection.Query(&totalCount, countQuery, from, to) + if err != nil { + impl.logger.Errorw("error getting total count of CI pipelines", "from", from, "to", to, "err", err) + return nil, 0, err + } + + // Build the main query with sorting and pagination + orderClause := "ORDER BY trigger_count DESC, cp.id DESC" + if sortOrder == bean.ASC { + orderClause = "ORDER BY trigger_count ASC, cp.id ASC" + } + + query := fmt.Sprintf(` + SELECT + a.id as app_id, + cp.id as pipeline_id, + cp.name as pipeline_name, + a.app_name, + COALESCE(COUNT(cw.id), 0) as trigger_count + FROM ci_pipeline cp + INNER JOIN app a ON cp.app_id = a.id + LEFT JOIN ci_workflow cw ON cp.id = cw.ci_pipeline_id + AND cw.started_on >= ? AND cw.started_on <= ? + WHERE cp.deleted = false AND a.app_type = 0 AND a.active = true + GROUP BY a.id, cp.id, cp.name, a.app_name + %s + LIMIT ? OFFSET ? + `, orderClause) + + _, err = impl.dbConnection.Query(&results, query, from, to, limit, offset) + if err != nil { + impl.logger.Errorw("error getting triggered CI pipelines", "from", from, "to", to, "sortOrder", sortOrder, "limit", limit, "offset", offset, "err", err) + return nil, 0, err + } + + return results, totalCount, nil +} + +func (impl *CiWorkflowRepositoryImpl) GetCiBuildCountInTimeRange(from, to *time.Time) (int, error) { + var count int + query := ` + SELECT COUNT(*) + FROM ci_workflow cw + INNER JOIN ci_pipeline cp ON cw.ci_pipeline_id = cp.id + INNER JOIN app a ON cp.app_id = a.id + WHERE cw.started_on >= ? AND cw.started_on <= ? + AND cp.ci_pipeline_type = 'CI_BUILD' + ` + + _, err := impl.dbConnection.Query(&count, query, from, to) + if err != nil { + impl.logger.Errorw("error getting CI_BUILD pipeline count in time range", "from", from, "to", to, "err", err) + return 0, err + } + + return count, nil +} + +func (impl *CiWorkflowRepositoryImpl) GetSuccessfulCIBuildsForBuildTime(from, to *time.Time) ([]WorkflowBuildTime, error) { + var workflows []WorkflowBuildTime + + query := ` + SELECT cw.started_on, cw.finished_on + FROM ci_workflow cw + INNER JOIN ci_pipeline cp ON cw.ci_pipeline_id = cp.id + INNER JOIN app a ON cp.app_id = a.id + WHERE cw.started_on >= ? AND cw.started_on <= ? + AND cw.finished_on IS NOT NULL + AND cw.status = 'Succeeded' + AND cp.ci_pipeline_type = 'CI_BUILD' + ORDER BY cw.started_on + ` + + _, err := impl.dbConnection.Query(&workflows, query, from, to) + if err != nil { + impl.logger.Errorw("error fetching successful CI builds for build time", "from", from, "to", to, "err", err) + return nil, err + } + + return workflows, nil +} + +// GetCIBuildsForStatusTrend returns all CI builds in the date range including builds of deleted pipelines +func (impl *CiWorkflowRepositoryImpl) GetCIBuildsForStatusTrend(from, to *time.Time) ([]WorkflowStatusData, error) { + var workflows []WorkflowStatusData + + query := ` + SELECT cw.started_on, cw.status + FROM ci_workflow cw + INNER JOIN ci_pipeline cp ON cw.ci_pipeline_id = cp.id + INNER JOIN app a ON cp.app_id = a.id + WHERE cw.started_on >= ? AND cw.started_on <= ? + AND cp.ci_pipeline_type = 'CI_BUILD' + ORDER BY cw.started_on + ` + + _, err := impl.dbConnection.Query(&workflows, query, from, to) + if err != nil { + impl.logger.Errorw("error fetching CI builds for status trend", "from", from, "to", to, "err", err) + return nil, err + } + + return workflows, nil +} diff --git a/internal/sql/repository/pipelineConfig/PipelineRepository.go b/internal/sql/repository/pipelineConfig/PipelineRepository.go index 0bd629a0e0..a89c23b007 100644 --- a/internal/sql/repository/pipelineConfig/PipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/PipelineRepository.go @@ -81,6 +81,12 @@ type Pipeline struct { sql.AuditLog } +// PipelineWithAppData represents production pipeline data with app information +type PipelineWithAppData struct { + AppId int `sql:"app_id"` + EnvironmentId int `sql:"environment_id"` +} + type PipelineRepository interface { Save(pipeline []*Pipeline, tx *pg.Tx) error Update(pipeline *Pipeline, tx *pg.Tx) error @@ -147,6 +153,15 @@ type PipelineRepository interface { GetAllArgoAppInfoByDeploymentAppNames(deploymentAppNames []string) ([]*PipelineDeploymentConfigObj, error) FindEnvIdsByIdsInIncludingDeleted(ids []int) ([]int, error) GetPipelineCountByDeploymentType(deploymentType string) (int, error) + + // Overview methods + FindActiveByCiPipelineIdsIn(ciPipelineIds []int) ([]*Pipeline, error) + GetPipelineCountByEnvironmentType(isProd bool) (int, error) + FindActiveByEnvironmentType(isProd bool) ([]*Pipeline, error) + // Count methods for performance optimization + GetActivePipelineCountByEnvironmentTypeInTimeRange(isProd bool, from, to *time.Time) (int, error) + // FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange returns production pipelines with app data that have deployment history within the specified time range + FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange(from, to *time.Time) ([]*PipelineWithAppData, error) } type CiArtifactDTO struct { @@ -986,3 +1001,100 @@ func (impl *PipelineRepositoryImpl) GetPipelineCountByDeploymentType(deploymentT } return count, nil } + +func (impl *PipelineRepositoryImpl) FindActiveByCiPipelineIdsIn(ciPipelineIds []int) ([]*Pipeline, error) { + var pipelines []*Pipeline + err := impl.dbConnection.Model(&pipelines). + Where("ci_pipeline_id in (?)", pg.In(ciPipelineIds)). + Where("deleted = ?", false). + Select() + return pipelines, err +} + +func (impl *PipelineRepositoryImpl) GetPipelineCountByEnvironmentType(isProd bool) (int, error) { + var count int + query := `SELECT COUNT(*) FROM pipeline p + JOIN environment e ON p.environment_id = e.id + WHERE p.deleted = false AND e.active = true AND e.default = ?` + + _, err := impl.dbConnection.Query(&count, query, isProd) + if err != nil { + impl.logger.Errorw("error getting pipeline count by environment type", "isProd", isProd, "err", err) + return 0, err + } + return count, nil +} + +func (impl *PipelineRepositoryImpl) FindActiveByEnvironmentType(isProd bool) ([]*Pipeline, error) { + var pipelines []*Pipeline + err := impl.dbConnection.Model(&pipelines). + Join("JOIN environment e ON pipeline.environment_id = e.id"). + Where("pipeline.deleted = ?", false). + Where("e.active = ?", true). + Where("e.default = ?", isProd). + Select() + + if err != nil { + impl.logger.Errorw("error finding active pipelines by environment type", "isProd", isProd, "err", err) + return nil, err + } + return pipelines, nil +} + +// Count methods implementation for performance optimization +func (impl *PipelineRepositoryImpl) GetActivePipelineCountByEnvironmentTypeInTimeRange(isProd bool, from, to *time.Time) (int, error) { + query := `SELECT COUNT(*) FROM pipeline p + JOIN environment e ON p.environment_id = e.id + WHERE p.deleted = false AND e.active = true AND e.default = ?` + + args := []interface{}{isProd} + + if from != nil { + query += " AND p.created_on >= ?" + args = append(args, from) + } + if to != nil { + query += " AND p.created_on <= ?" + args = append(args, to) + } + + var count int + _, err := impl.dbConnection.Query(&count, query, args...) + if err != nil { + impl.logger.Errorw("error getting active pipeline count by environment type in time range", "isProd", isProd, "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +// FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange returns production pipelines with app data that have deployment history within the specified time range +// This optimized method combines multiple queries into one by joining pipeline, environment, app, and cd_workflow tables with time filtering +func (impl *PipelineRepositoryImpl) FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange(from, to *time.Time) ([]*PipelineWithAppData, error) { + var results []*PipelineWithAppData + + query := ` + SELECT DISTINCT + p.app_id, + p.environment_id + FROM pipeline p + INNER JOIN environment e ON p.environment_id = e.id + INNER JOIN app a ON p.app_id = a.id + INNER JOIN cd_workflow cw ON p.id = cw.pipeline_id + INNER JOIN cd_workflow_runner cwr ON cw.id = cwr.cd_workflow_id + WHERE p.deleted = false + AND e.active = true + AND e.default = true + AND a.active = true + AND cwr.workflow_type = 'DEPLOY' + AND cwr.started_on >= ? + AND cwr.started_on <= ? + ` + + _, err := impl.dbConnection.Query(&results, query, from, to) + if err != nil { + impl.logger.Errorw("error finding production pipelines with app data and deployment history in time range", "from", from, "to", to, "err", err) + return nil, err + } + + return results, nil +} diff --git a/internal/sql/repository/pipelineConfig/bean/constants/WorkflowConstants.go b/internal/sql/repository/pipelineConfig/bean/constants/WorkflowConstants.go new file mode 100644 index 0000000000..a10c795b57 --- /dev/null +++ b/internal/sql/repository/pipelineConfig/bean/constants/WorkflowConstants.go @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package constants + +// ScanEnablementType represents scan enablement filter options +type ScanEnablementType string + +const ( + ScanEnabled ScanEnablementType = "scanEnabled" // Workflows with scanning enabled + ScanNotEnabled ScanEnablementType = "scanNotEnabled" // Workflows with scanning disabled +) + +// WorkflowSortBy represents sort field for workflow listing +type WorkflowSortBy string + +const ( + WorkflowSortByWorkflowName WorkflowSortBy = "workflowName" // Sort by workflow name + WorkflowSortByAppName WorkflowSortBy = "appName" // Sort by application name + WorkflowSortByScanEnabled WorkflowSortBy = "scanEnabled" // Sort by scan enabled status +) + +// SortOrder represents sort order +type SortOrder string + +const ( + SortOrderAsc SortOrder = "ASC" + SortOrderDesc SortOrder = "DESC" +) diff --git a/pkg/asyncProvider/WorkerPoolWrapper.go b/pkg/asyncProvider/WorkerPoolWrapper.go new file mode 100644 index 0000000000..a564606355 --- /dev/null +++ b/pkg/asyncProvider/WorkerPoolWrapper.go @@ -0,0 +1,11 @@ +package asyncProvider + +import ( + "github.com/devtron-labs/common-lib/constants" + "github.com/devtron-labs/common-lib/workerPool" + "go.uber.org/zap" +) + +func NewBatchWorker[T any](batchSize int, logger *zap.SugaredLogger) *workerPool.WorkerPool[T] { + return workerPool.NewWorkerPool[T](batchSize, constants.Orchestrator, logger) +} diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index b7c4807c8e..42cbd360c4 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -92,6 +92,7 @@ type ClusterService interface { ConvertClusterBeanObjectToCluster(bean *bean.ClusterBean) *v1alpha1.Cluster GetClusterConfigByClusterId(clusterId int) (*k8s.ClusterConfig, error) + FindActiveClustersExcludingVirtual() ([]bean.ClusterBean, error) } type ClusterServiceImpl struct { @@ -1090,3 +1091,16 @@ func (impl *ClusterServiceImpl) GetClusterConfigByClusterId(clusterId int) (*k8s clusterConfig := rq.GetClusterConfig() return clusterConfig, nil } + +func (impl *ClusterServiceImpl) FindActiveClustersExcludingVirtual() ([]bean.ClusterBean, error) { + models, err := impl.clusterRepository.FindAllActiveExceptVirtual() + if err != nil { + return nil, err + } + var beans []bean.ClusterBean + for _, model := range models { + bean := adapter.GetClusterBean(model) + beans = append(beans, bean) + } + return beans, nil +} diff --git a/pkg/cluster/environment/EnvironmentService.go b/pkg/cluster/environment/EnvironmentService.go index 8ad9077537..c7c0e5a8db 100644 --- a/pkg/cluster/environment/EnvironmentService.go +++ b/pkg/cluster/environment/EnvironmentService.go @@ -70,6 +70,7 @@ type EnvironmentService interface { GetCombinedEnvironmentListForDropDownByClusterIds(token string, clusterIds []int, auth func(token string, object string) bool) ([]*bean2.ClusterEnvDto, error) HandleErrorInClusterConnections(clusters []*bean4.ClusterBean, respMap *sync.Map, clusterExistInDb bool) GetDetailsById(envId int) (*repository.Environment, error) + FindNamesByIds(envIds []int) (map[int]string, error) } type EnvironmentServiceImpl struct { @@ -752,3 +753,7 @@ func (impl EnvironmentServiceImpl) GetDetailsById(envId int) (*repository.Enviro } return envDetails, nil } + +func (impl EnvironmentServiceImpl) FindNamesByIds(envIds []int) (map[int]string, error) { + return impl.environmentRepository.FindNamesByIds(envIds) +} diff --git a/pkg/cluster/environment/repository/EnvironmentRepository.go b/pkg/cluster/environment/repository/EnvironmentRepository.go index b573cf6f2f..1a479a9dc4 100644 --- a/pkg/cluster/environment/repository/EnvironmentRepository.go +++ b/pkg/cluster/environment/repository/EnvironmentRepository.go @@ -18,9 +18,13 @@ package repository import ( "fmt" + "time" + "github.com/devtron-labs/devtron/internal/sql/repository/appStatus" "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/cluster/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" @@ -61,6 +65,8 @@ type EnvironmentRepository interface { Create(mappings *Environment) error FindAll() ([]Environment, error) FindAllActive() ([]*Environment, error) + FindAllActiveInTimeRange(from, to *time.Time) ([]*Environment, error) + GetAggregatedEnvironmentTrendWithParams(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.TimeDataPoint, error) FindAllActiveEnvironmentCount() (int, error) MarkEnvironmentDeleted(mappings *Environment, tx *pg.Tx) error GetConnection() (dbConnection *pg.DB) @@ -80,6 +86,7 @@ type EnvironmentRepository interface { FindByClusterIdAndNamespace(namespaceClusterPair []*ClusterNamespacePair) ([]*Environment, error) FindByClusterIds(clusterIds []int) ([]*Environment, error) FindIdsByNames(envNames []string) ([]int, error) + FindNamesByIds(envIds []int) (map[int]string, error) FindByNames(envNames []string) ([]*Environment, error) FindByEnvName(envName string) ([]*Environment, error) @@ -361,6 +368,29 @@ func (repo EnvironmentRepositoryImpl) FindIdsByNames(envNames []string) ([]int, return ids, err } +// FindNamesByIds returns a map of environment id to environment name for the given ids +func (repo *EnvironmentRepositoryImpl) FindNamesByIds(envIds []int) (map[int]string, error) { + if len(envIds) == 0 { + return make(map[int]string), nil + } + type EnvIdName struct { + Id int `sql:"id"` + Name string `sql:"environment_name"` + } + var envIdNames []EnvIdName + query := "SELECT id, environment_name FROM environment WHERE id IN (?) AND active = ?;" + _, err := repo.dbConnection.Query(&envIdNames, query, pg.In(envIds), true) + if err != nil { + return nil, err + } + + envMap := make(map[int]string, len(envIdNames)) + for _, env := range envIdNames { + envMap[env.Id] = env.Name + } + return envMap, nil +} + func (repo EnvironmentRepositoryImpl) FindByNames(envNames []string) ([]*Environment, error) { var environment []*Environment err := repo.dbConnection. @@ -436,3 +466,70 @@ func (repositoryImpl EnvironmentRepositoryImpl) FindEnvLinkedWithCiPipelines(ext //" INNER JOIN " + //" (SELECT apf2.app_workflow_id FROM app_workflow_mapping apf2 WHERE component_id IN (?) AND type='CI_PIPELINE') sqt " + //" ON apf.app_workflow_id = sqt.app_workflow_id;" + +func (repo EnvironmentRepositoryImpl) FindAllActiveInTimeRange(from, to *time.Time) ([]*Environment, error) { + var mappings []*Environment + query := repo. + dbConnection.Model(&mappings). + Where("environment.active = ?", true) + + if from != nil { + query = query.Where("environment.created_on >= ?", from) + } + if to != nil { + query = query.Where("environment.created_on <= ?", to) + } + + err := query.Select() + return mappings, err +} + +func (repo EnvironmentRepositoryImpl) GetAggregatedEnvironmentTrendWithParams(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.TimeDataPoint, error) { + var results []struct { + Date string `json:"date"` + Count int `json:"count"` + } + + var query string + if aggregationType == constants.AggregateByHour { + // Aggregate by hour for "Today" + query = ` + SELECT + TO_CHAR(DATE_TRUNC('hour', created_on), 'YYYY-MM-DD HH24:00') as date, + COUNT(*) as count + FROM environment + WHERE active = true + AND created_on >= ? AND created_on <= ? + GROUP BY DATE_TRUNC('hour', created_on) + ORDER BY DATE_TRUNC('hour', created_on) + ` + } else { + // Aggregate by day for other periods + query = ` + SELECT + TO_CHAR(DATE_TRUNC('day', created_on), 'YYYY-MM-DD') as date, + COUNT(*) as count + FROM environment + WHERE active = true + AND created_on >= ? AND created_on <= ? + GROUP BY DATE_TRUNC('day', created_on) + ORDER BY DATE_TRUNC('day', created_on) + ` + } + + _, err := repo.dbConnection.Query(&results, query, from, to) + if err != nil { + return nil, err + } + + // Convert to TimeDataPoint + trendData := make([]bean.TimeDataPoint, 0, len(results)) + for _, result := range results { + trendData = append(trendData, bean.TimeDataPoint{ + Date: result.Date, + Count: result.Count, + }) + } + + return trendData, nil +} diff --git a/pkg/overview/AppManagementService.go b/pkg/overview/AppManagementService.go new file mode 100644 index 0000000000..6bc2c7bece --- /dev/null +++ b/pkg/overview/AppManagementService.go @@ -0,0 +1,906 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + "math" + "strconv" + "time" + + "github.com/devtron-labs/devtron/internal/sql/repository/app" + deploymentConfigRepo "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + workflowStageRepository "github.com/devtron-labs/devtron/pkg/pipeline/workflowStatus/repository" + teamRepository "github.com/devtron-labs/devtron/pkg/team/repository" + "go.uber.org/zap" +) + +type AppManagementService interface { + GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) + GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) + GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) + GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) +} + +type AppManagementServiceImpl struct { + logger *zap.SugaredLogger + appRepository app.AppRepository + pipelineRepository pipelineConfig.PipelineRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository + environmentRepository repository.EnvironmentRepository + teamRepository teamRepository.TeamRepository + workflowStageRepository workflowStageRepository.WorkflowStageRepository + deploymentConfigRepository deploymentConfigRepo.Repository +} + +func NewAppManagementServiceImpl( + logger *zap.SugaredLogger, + appRepository app.AppRepository, + pipelineRepository pipelineConfig.PipelineRepository, + ciPipelineRepository pipelineConfig.CiPipelineRepository, + ciWorkflowRepository pipelineConfig.CiWorkflowRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, + environmentRepository repository.EnvironmentRepository, + teamRepository teamRepository.TeamRepository, + workflowStageRepository workflowStageRepository.WorkflowStageRepository, + deploymentConfigRepository deploymentConfigRepo.Repository, +) *AppManagementServiceImpl { + return &AppManagementServiceImpl{ + logger: logger, + appRepository: appRepository, + pipelineRepository: pipelineRepository, + ciPipelineRepository: ciPipelineRepository, + ciWorkflowRepository: ciWorkflowRepository, + cdWorkflowRepository: cdWorkflowRepository, + environmentRepository: environmentRepository, + teamRepository: teamRepository, + workflowStageRepository: workflowStageRepository, + deploymentConfigRepository: deploymentConfigRepository, + } +} + +func (impl *AppManagementServiceImpl) GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) { + + allProjects, err := impl.teamRepository.FindAllActive() + if err != nil { + impl.logger.Errorw("error getting all projects", "err", err) + return nil, err + } + + allDevtronApps, err := impl.appRepository.FindAll() + if err != nil { + impl.logger.Errorw("error getting all devtron apps", "err", err) + return nil, err + } + + allHelmApps, err := impl.appRepository.FindAllChartStoreApps() + if err != nil { + impl.logger.Errorw("error getting all helm apps", "err", err) + return nil, err + } + + allEnvironments, err := impl.environmentRepository.FindAllActive() + if err != nil { + impl.logger.Errorw("error getting all environments", "err", err) + return nil, err + } + + response := &bean.AppsOverviewResponse{ + Projects: &bean.AtAGlanceMetric{ + Total: len(allProjects), + }, + YourApplications: &bean.AtAGlanceMetric{ + Total: len(allDevtronApps), + }, + HelmApplications: &bean.AtAGlanceMetric{ + Total: len(allHelmApps), + }, + Environments: &bean.AtAGlanceMetric{ + Total: len(allEnvironments), + }, + } + + return response, nil +} + +func (impl *AppManagementServiceImpl) GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) { + allTimeMetrics, err := impl.fetchAllWorkflowMetrics(ctx) + if err != nil { + return nil, err + } + + response := impl.buildWorkflowOverviewResponse(allTimeMetrics) + return response, nil +} + +func (impl *AppManagementServiceImpl) GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) { + impl.logger.Infow("getting build deployment activity overview", "request", request) + + // Get current period counts - now tracking only CI_BUILD pipeline builds (including failed ones) + currentTotalBuilds, err := impl.ciWorkflowRepository.GetCiBuildCountInTimeRange(request.From, request.To) + if err != nil { + impl.logger.Errorw("error getting current total builds count", "err", err) + return nil, err + } + + // Get average build time (only for CI_BUILD pipelines) + avgBuildTime, err := impl.calculateAverageBuildTime(request.From, request.To) + if err != nil { + impl.logger.Errorw("error calculating average build time", "err", err) + // Don't fail the request, just set to 0 + avgBuildTime = 0 + } + + // Get current total deployments count - now tracking ALL triggered deployments (including failed ones) + currentTotalDeployments, err := impl.cdWorkflowRepository.GetDeploymentCountInTimeRange(request.From, request.To) + if err != nil { + impl.logger.Errorw("error getting current total deployments count", "err", err) + return nil, err + } + + response := &bean.BuildDeploymentActivityResponse{ + TotalBuildTriggers: currentTotalBuilds, + AverageBuildTime: avgBuildTime, + TotalDeploymentTriggers: currentTotalDeployments, + } + + return response, nil +} + +func (impl *AppManagementServiceImpl) GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) { + impl.logger.Infow("getting build deployment activity detailed", "request", request) + + response := &bean.BuildDeploymentActivityDetailedResponse{ + ActivityKind: request.ActivityKind, + AggregationType: request.AggregationType, + } + + // Based on activityKind, fetch only the requested data + switch request.ActivityKind { + case bean.ActivityKindBuildTrigger: + buildTriggersTrend, err := impl.getAggregatedBuildStatusTrend(request.From, request.To, request.AggregationType) + if err != nil { + impl.logger.Errorw("error getting aggregated build status trend", "err", err) + return nil, err + } + response.BuildTriggersTrend = buildTriggersTrend + + case bean.ActivityKindDeploymentTrigger: + deploymentTriggersTrend, err := impl.getAggregatedDeploymentStatusTrend(request.From, request.To, request.AggregationType) + if err != nil { + impl.logger.Errorw("error getting aggregated deployment status trend", "err", err) + return nil, err + } + response.DeploymentTriggersTrend = deploymentTriggersTrend + + case bean.ActivityKindAvgBuildTime: + avgBuildTimeTrend, err := impl.getAggregatedBuildTimeTrend(request.From, request.To, request.AggregationType) + if err != nil { + impl.logger.Errorw("error getting aggregated build time trend", "err", err) + return nil, err + } + response.AvgBuildTimeTrend = avgBuildTimeTrend + + default: + return nil, fmt.Errorf("invalid activityKind: %s", request.ActivityKind) + } + + return response, nil +} + +func (impl *AppManagementServiceImpl) getProjectMetrics(ctx context.Context, from, to *time.Time) (*bean.ProjectMetrics, error) { + teams, err := impl.teamRepository.FindAllActiveInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting projects", "err", err) + return nil, err + } + + details := make([]bean.EntityMetadata, 0, len(teams)) + for _, team := range teams { + details = append(details, bean.EntityMetadata{ + Name: team.Name, + CreatedOn: team.CreatedOn, + }) + } + + return &bean.ProjectMetrics{ + Total: len(teams), + Details: details, + }, nil +} + +func (impl *AppManagementServiceImpl) getAppMetrics(ctx context.Context, from, to *time.Time) (*bean.AppMetrics, error) { + // Get normal apps (CI/CD apps with appType = 0) with details in time range + devtronApps, err := impl.appRepository.FindAllActiveDevtronAppsInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting all devtron apps", "err", err) + return nil, err + } + + normalAppsDetails := make([]bean.EntityMetadata, 0, len(devtronApps)) + for _, app := range devtronApps { + normalAppsDetails = append(normalAppsDetails, bean.EntityMetadata{ + Name: app.AppName, + CreatedOn: app.CreatedOn, + }) + } + + // Get chart store apps (external apps with appType = 1) with details in time range + chartStoreApps, err := impl.appRepository.FindAllActiveChartStoreAppsInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting all chart store apps", "err", err) + return nil, err + } + + chartStoreAppsDetails := make([]bean.EntityMetadata, 0, len(chartStoreApps)) + for _, app := range chartStoreApps { + chartStoreAppsDetails = append(chartStoreAppsDetails, bean.EntityMetadata{ + Name: app.AppName, + CreatedOn: app.CreatedOn, + }) + } + + totalApps := len(devtronApps) + len(chartStoreApps) + + return &bean.AppMetrics{ + Total: totalApps, + YourApps: &bean.AppTypeMetrics{ + Total: len(devtronApps), + Details: normalAppsDetails, + }, + ThirdPartyApps: &bean.AppTypeMetrics{ + Total: len(chartStoreApps), + Details: chartStoreAppsDetails, + }, + }, nil +} + +func (impl *AppManagementServiceImpl) getEnvironmentMetrics(ctx context.Context, from, to *time.Time) (*bean.EnvironmentMetrics, error) { + environments, err := impl.environmentRepository.FindAllActiveInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting environments", "err", err) + return nil, err + } + + details := make([]bean.EntityMetadata, 0, len(environments)) + for _, env := range environments { + details = append(details, bean.EntityMetadata{ + Name: env.Name, + CreatedOn: env.CreatedOn, + }) + } + + return &bean.EnvironmentMetrics{ + Total: len(environments), + Details: details, + }, nil +} + +func (impl *AppManagementServiceImpl) getBuildPipelineMetrics(ctx context.Context, from, to *time.Time) (*bean.BuildPipelineMetrics, error) { + // Get counts directly instead of fetching full structs + normalCiCount, err := impl.ciPipelineRepository.GetActiveCiPipelineCountInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error getting normal CI pipelines count", "err", err) + return nil, err + } + + externalCiCount, err := impl.ciPipelineRepository.GetActiveExternalCiPipelineCountInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error getting external CI pipelines count", "err", err) + return nil, err + } + + // For details, we still need to fetch some data, but only if details are actually needed + // For now, we'll provide empty details arrays since the main use case is just counts + var normalPipelines []bean.EntityMetadata + var externalPipelines []bean.EntityMetadata + + total := normalCiCount + externalCiCount + + return &bean.BuildPipelineMetrics{ + Total: total, + NormalCiPipelines: &bean.CiPipelineTypeMetrics{ + Total: normalCiCount, + Details: normalPipelines, // Empty for performance - can be populated if needed + }, + ExternalCiPipelines: &bean.CiPipelineTypeMetrics{ + Total: externalCiCount, + Details: externalPipelines, // Empty for performance - can be populated if needed + }, + }, nil +} + +func (impl *AppManagementServiceImpl) getCdPipelineMetrics(ctx context.Context, from, to *time.Time) (*bean.CdPipelineMetrics, error) { + // Get counts directly instead of fetching full structs + prodCount, err := impl.pipelineRepository.GetActivePipelineCountByEnvironmentTypeInTimeRange(true, from, to) + if err != nil { + impl.logger.Errorw("error getting production pipelines count", "err", err) + return nil, err + } + + nonProdCount, err := impl.pipelineRepository.GetActivePipelineCountByEnvironmentTypeInTimeRange(false, from, to) + if err != nil { + impl.logger.Errorw("error getting non-production pipelines count", "err", err) + return nil, err + } + + // For details, we provide empty arrays since the main use case is just counts + var prodDetails []bean.EntityMetadata + var nonProdDetails []bean.EntityMetadata + + total := prodCount + nonProdCount + + return &bean.CdPipelineMetrics{ + Total: total, + Production: &bean.PipelineEnvironmentMetrics{ + Total: prodCount, + Details: prodDetails, // Empty for performance - can be populated if needed + }, + NonProduction: &bean.PipelineEnvironmentMetrics{ + Total: nonProdCount, + Details: nonProdDetails, // Empty for performance - can be populated if needed + }, + }, nil +} + +func (impl *AppManagementServiceImpl) getEnvironmentTrendMetrics(ctx context.Context, from, to *time.Time, aggregationType constants.AggregationType) (*bean.EnvironmentTrendMetrics, error) { + // Get aggregated environment trend data + trendData, err := impl.environmentRepository.GetAggregatedEnvironmentTrendWithParams(from, to, aggregationType) + if err != nil { + impl.logger.Errorw("error getting environment trend data", "err", err) + return nil, err + } + + // Calculate total + total := 0 + for _, data := range trendData { + total += data.Count + } + + return &bean.EnvironmentTrendMetrics{ + Total: total, + Trend: trendData, + }, nil +} + +func (impl *AppManagementServiceImpl) getAggregatedBuildStatusTrend(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.BuildStatusDataPoint, error) { + workflows, err := impl.ciWorkflowRepository.GetCIBuildsForStatusTrend(from, to) + if err != nil { + impl.logger.Errorw("error fetching CI builds for status trend", "err", err) + return nil, err + } + + statusMap := make(map[string]map[string]int) // timeKey -> status -> count + + targetLocation := from.Location() + + for _, workflow := range workflows { + // Convert UTC workflow.StartedOn to the target timezone for proper time bucketing + localStartedOn := workflow.StartedOn.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = localStartedOn.Truncate(time.Hour).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + timeKey = localStartedOn.Truncate(24 * time.Hour).Format("2006-01-02T15:04:05Z") + } + + if statusMap[timeKey] == nil { + statusMap[timeKey] = make(map[string]int) + } + + // Categorize status + switch workflow.Status { + case "Succeeded": + statusMap[timeKey]["successful"]++ + case "Failed", "Error", "Cancelled", "CANCELLED": + statusMap[timeKey]["failed"]++ + } + statusMap[timeKey]["total"]++ + } + var trendData []bean.BuildStatusDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.BuildStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + // Generate monthly series + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.BuildStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Generate daily series + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.BuildStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData, nil +} + +// Helper method to get aggregated deployment status trend with success/failed breakdown +func (impl *AppManagementServiceImpl) getAggregatedDeploymentStatusTrend(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.DeploymentStatusDataPoint, error) { + // Fetch all deployment workflows in the date range from repository + deployments, err := impl.cdWorkflowRepository.GetDeploymentWorkflowsForStatusTrend(from, to) + if err != nil { + impl.logger.Errorw("error fetching deployment workflows for status trend", "err", err) + return nil, err + } + + // Group deployments by time period and count statuses + statusMap := make(map[string]map[string]int) // timeKey -> status -> count + + // Get the timezone from the from/to parameters for proper time bucketing + targetLocation := from.Location() + + for _, deployment := range deployments { + // Convert UTC deployment.StartedOn to the target timezone for proper time bucketing + localStartedOn := deployment.StartedOn.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = localStartedOn.Truncate(time.Hour).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + timeKey = localStartedOn.Truncate(24 * time.Hour).Format("2006-01-02T15:04:05Z") + } + + if statusMap[timeKey] == nil { + statusMap[timeKey] = make(map[string]int) + } + + // Categorize status + switch deployment.Status { + case "Succeeded": + statusMap[timeKey]["successful"]++ + case "Failed", "Error", "Cancelled", "CANCELLED": + statusMap[timeKey]["failed"]++ + } + statusMap[timeKey]["total"]++ + } + // Generate complete time series and populate with counts + var trendData []bean.DeploymentStatusDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.DeploymentStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + // Generate monthly series + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.DeploymentStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Generate daily series + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.DeploymentStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData, nil +} + +func (impl *AppManagementServiceImpl) getAggregatedBuildTimeTrend(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.BuildTimeDataPoint, error) { + workflows, err := impl.getSuccessfulBuildsFromStages(from, to) + if err != nil { + impl.logger.Errorw("error fetching successful workflows from stages", "err", err) + // Fallback to original method if new method fails + impl.logger.Infow("falling back to original method for build time trend calculation") + workflows, err = impl.ciWorkflowRepository.GetSuccessfulCIBuildsForBuildTime(from, to) + if err != nil { + impl.logger.Errorw("error fetching successful workflows (fallback)", "err", err) + return nil, err + } + } + + // Calculate build times and group by time period + buildTimeMap := make(map[string][]float64) + + // Get the timezone from the from/to parameters for proper time bucketing + targetLocation := from.Location() + + for _, workflow := range workflows { + // Calculate build time in minutes + duration := workflow.FinishedOn.Sub(workflow.StartedOn) + buildTimeMinutes := duration.Minutes() + + // Convert UTC workflow.StartedOn to the target timezone for proper time bucketing + localStartedOn := workflow.StartedOn.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), localStartedOn.Hour(), 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + // For daily aggregation, get midnight of the local date + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } + + buildTimeMap[timeKey] = append(buildTimeMap[timeKey], buildTimeMinutes) + } + // Generate complete time series and calculate averages + var trendData []bean.BuildTimeDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + avgBuildTime := 0.0 + + if buildTimes, exists := buildTimeMap[timeKey]; exists && len(buildTimes) > 0 { + sum := 0.0 + for _, bt := range buildTimes { + sum += bt + } + avgBuildTime = sum / float64(len(buildTimes)) + } + + trendData = append(trendData, bean.BuildTimeDataPoint{ + Timestamp: current, + AverageBuildTime: math.Round(avgBuildTime*100) / 100, // Round to 2 decimal places + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + // Generate monthly series + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + avgBuildTime := 0.0 + + if buildTimes, exists := buildTimeMap[timeKey]; exists && len(buildTimes) > 0 { + sum := 0.0 + for _, bt := range buildTimes { + sum += bt + } + avgBuildTime = sum / float64(len(buildTimes)) + } + + trendData = append(trendData, bean.BuildTimeDataPoint{ + Timestamp: current, + AverageBuildTime: math.Round(avgBuildTime*100) / 100, // Round to 2 decimal places + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Generate daily series + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + avgBuildTime := 0.0 + + if buildTimes, exists := buildTimeMap[timeKey]; exists && len(buildTimes) > 0 { + sum := 0.0 + for _, bt := range buildTimes { + sum += bt + } + avgBuildTime = sum / float64(len(buildTimes)) + } + + trendData = append(trendData, bean.BuildTimeDataPoint{ + Timestamp: current, + AverageBuildTime: math.Round(avgBuildTime*100) / 100, // Round to 2 decimal places + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData, nil +} + +// WorkflowMetrics holds aggregated workflow metrics for a time period +type WorkflowMetrics struct { + BuildPipelinesCount int // CI pipelines count + ProductionPipelinesCount int // Production deployment pipelines count + NonProdPipelinesCount int // Non-production deployment pipelines count + ExternalCICount int + ScanningEnabledPercentage float64 // Percentage of build pipelines where scanning is enabled + GitOpsComplianceCount int // Count of GitOps enabled pipelines + GitOpsCoveragePercentage float64 // Percentage of GitOps coverage +} + +func (impl *AppManagementServiceImpl) fetchAllWorkflowMetrics(ctx context.Context) (*WorkflowMetrics, error) { + buildPipelinesCount, err := impl.ciPipelineRepository.GetActiveCiPipelineCount() + if err != nil { + impl.logger.Errorw("error getting build pipelines count", "err", err) + return nil, err + } + + productionPipelinesCount, err := impl.pipelineRepository.GetPipelineCountByEnvironmentType(true) + if err != nil { + impl.logger.Errorw("error getting production pipelines count", "err", err) + return nil, err + } + + nonProdPipelinesCount, err := impl.pipelineRepository.GetPipelineCountByEnvironmentType(false) + if err != nil { + impl.logger.Errorw("error getting non-production pipelines count", "err", err) + return nil, err + } + + // Get scanning enabled count directly + scanningEnabledCount, err := impl.ciPipelineRepository.GetScanEnabledCiPipelineCount() + if err != nil { + impl.logger.Errorw("error getting scanning enabled count", "err", err) + return nil, err + } + + // Calculate scanning enabled percentage + var scanningEnabledPercentage float64 + if buildPipelinesCount > 0 { + scanningEnabledPercentage = (float64(scanningEnabledCount) / float64(buildPipelinesCount)) * 100 + } + + // Get external CI count directly + externalCICount, err := impl.ciPipelineRepository.GetActiveExternalCiPipelineCount() + if err != nil { + impl.logger.Errorw("error getting external CI count", "err", err) + return nil, err + } + + // Get GitOps compliance count directly + gitOpsComplianceCount, err := impl.deploymentConfigRepository.GetGitOpsEnabledPipelineCount() + if err != nil { + impl.logger.Errorw("error getting GitOps compliance count", "err", err) + return nil, err + } + + // Calculate GitOps coverage percentage + totalActivePipelines := productionPipelinesCount + nonProdPipelinesCount + var gitOpsCoveragePercentage float64 + if totalActivePipelines > 0 { + gitOpsCoveragePercentage = (float64(gitOpsComplianceCount) / float64(totalActivePipelines)) * 100 + } + + metrics := &WorkflowMetrics{ + BuildPipelinesCount: buildPipelinesCount, + ProductionPipelinesCount: productionPipelinesCount, + NonProdPipelinesCount: nonProdPipelinesCount, + ExternalCICount: externalCICount, + ScanningEnabledPercentage: scanningEnabledPercentage, + GitOpsComplianceCount: gitOpsComplianceCount, + GitOpsCoveragePercentage: gitOpsCoveragePercentage, + } + + return metrics, nil +} + +func (impl *AppManagementServiceImpl) buildWorkflowOverviewResponse(allTime *WorkflowMetrics) *bean.WorkflowOverviewResponse { + allTimeAllDeployments := allTime.ProductionPipelinesCount + allTime.NonProdPipelinesCount + + scanningMetric := &bean.AtAGlanceMetric{ + Percentage: allTime.ScanningEnabledPercentage, + } + + gitOpsMetric := &bean.AtAGlanceMetric{ + Total: allTime.GitOpsComplianceCount, // All-time count + Percentage: allTime.GitOpsCoveragePercentage, + } + + productionMetric := &bean.AtAGlanceMetric{ + Total: allTime.ProductionPipelinesCount, // All-time count + } + + return &bean.WorkflowOverviewResponse{ + BuildPipelines: &bean.AtAGlanceMetric{ + Total: allTime.BuildPipelinesCount, + }, + ExternalImageSource: &bean.AtAGlanceMetric{ + Total: allTime.ExternalCICount, + }, + AllDeploymentPipelines: &bean.AtAGlanceMetric{ + Total: allTimeAllDeployments, + }, + ScanningEnabledInWorkflows: scanningMetric, + GitOpsComplianceProdPipelines: gitOpsMetric, + ProductionPipelines: productionMetric, + } +} + +// calculateAverageBuildTime calculates the average build time for successful CI_BUILD pipelines +// in the given time range using accurate timing data from workflow_execution_stage table. +// This provides more precise build times by using the actual start_time and end_time from +// the Execution stage where workflow_type=CI, stage_name=Execution, status=SUCCEEDED, status_for=workflow. +func (impl *AppManagementServiceImpl) calculateAverageBuildTime(from, to *time.Time) (float64, error) { + // Fetch successful builds from workflow_execution_stage table for accurate timing + successfulBuilds, err := impl.getSuccessfulBuildsFromStages(from, to) + if err != nil { + impl.logger.Errorw("error getting successful builds for build time calculation from stages", "from", from, "to", to, "err", err) + // Fallback to original method if new method fails + impl.logger.Infow("falling back to original method for build time calculation") + successfulBuilds, err = impl.ciWorkflowRepository.GetSuccessfulCIBuildsForBuildTime(from, to) + if err != nil { + impl.logger.Errorw("error getting successful builds for build time calculation (fallback)", "from", from, "to", to, "err", err) + return 0, err + } + } + + // Return 0 if no successful builds found + if len(successfulBuilds) == 0 { + impl.logger.Infow("no successful builds found for average build time calculation", "from", from, "to", to) + return 0, nil + } + + // Calculate average build time in code for better precision and error handling + totalBuildTimeMinutes := float64(0) + validBuilds := 0 + + for _, build := range successfulBuilds { + // Ensure both timestamps are valid + if !build.StartedOn.IsZero() && !build.FinishedOn.IsZero() && build.FinishedOn.After(build.StartedOn) { + // Calculate duration in minutes with millisecond precision + duration := build.FinishedOn.Sub(build.StartedOn) + buildTimeMinutes := duration.Minutes() + + // Only include positive build times (sanity check) + if buildTimeMinutes > 0 { + totalBuildTimeMinutes += buildTimeMinutes + validBuilds++ + } + } + } + + // Calculate average if we have valid builds + avgBuildTime := float64(0) + if validBuilds > 0 { + avgBuildTime = totalBuildTimeMinutes / float64(validBuilds) + } + + return avgBuildTime, nil +} + +// getSuccessfulBuildsFromStages fetches successful CI builds from workflow_execution_stage table +// and converts them to WorkflowBuildTime format for compatibility with existing logic +func (impl *AppManagementServiceImpl) getSuccessfulBuildsFromStages(from, to *time.Time) ([]pipelineConfig.WorkflowBuildTime, error) { + stages, err := impl.workflowStageRepository.GetSuccessfulCIExecutionStages(from, to) + if err != nil { + return nil, err + } + + var workflows []pipelineConfig.WorkflowBuildTime + for _, stage := range stages { + startTime, err := impl.parseTimeString(stage.StartTime) + if err != nil { + impl.logger.Warnw("failed to parse start_time, skipping stage", "workflowId", stage.WorkflowId, "startTime", stage.StartTime, "err", err) + continue + } + + endTime, err := impl.parseTimeString(stage.EndTime) + if err != nil { + impl.logger.Warnw("failed to parse end_time, skipping stage", "workflowId", stage.WorkflowId, "endTime", stage.EndTime, "err", err) + continue + } + + if !endTime.After(startTime) { + impl.logger.Warnw("end_time is not after start_time, skipping stage", "workflowId", stage.WorkflowId, "startTime", startTime, "endTime", endTime) + continue + } + + workflows = append(workflows, pipelineConfig.WorkflowBuildTime{ + StartedOn: startTime, + FinishedOn: endTime, + }) + } + + return workflows, nil +} + +// parseTimeString parses time string that can be either ISO format or Unix timestamp +func (impl *AppManagementServiceImpl) parseTimeString(timeStr string) (time.Time, error) { + if timeStr == "" { + return time.Time{}, fmt.Errorf("empty time string") + } + + // Try parsing as ISO format first (e.g., "2024-01-15T10:30:45Z") + if t, err := time.Parse(time.RFC3339, timeStr); err == nil { + return t, nil + } + + // Try parsing as Unix timestamp in milliseconds + if timestamp, err := strconv.ParseInt(timeStr, 10, 64); err == nil { + return time.Unix(timestamp/1000, (timestamp%1000)*1000000), nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string: %s", timeStr) +} diff --git a/pkg/overview/ClusterOverviewAdapter.go b/pkg/overview/ClusterOverviewAdapter.go new file mode 100644 index 0000000000..9c6856ebe2 --- /dev/null +++ b/pkg/overview/ClusterOverviewAdapter.go @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + "github.com/devtron-labs/devtron/pkg/overview/util" +) + +// NewClusterOverviewResponse creates and initializes a new ClusterOverviewResponse with default values +func NewClusterOverviewResponse(totalClusters int) *bean.ClusterOverviewResponse { + return &bean.ClusterOverviewResponse{ + TotalClusters: totalClusters, + TotalCpuCapacity: NewResourceCapacity("0", "cores"), + TotalMemoryCapacity: NewResourceCapacity("0", "Gi"), + ClusterStatusBreakdown: NewClusterStatusBreakdown(), + NodeSchedulingBreakdown: NewNodeSchedulingBreakdown(), + NodeErrorBreakdown: NewNodeErrorBreakdown(), + ClusterDistribution: NewClusterDistribution(), + ClusterCapacityDistribution: []bean.ClusterCapacityDistribution{}, + NodeDistribution: NewNodeDistribution(), + } +} + +// NewEmptyClusterOverviewResponse creates an empty ClusterOverviewResponse with zero values +func NewEmptyClusterOverviewResponse() *bean.ClusterOverviewResponse { + return NewClusterOverviewResponse(0) +} + +// NewResourceCapacity creates a new ResourceCapacity with the given value and unit +func NewResourceCapacity(value, unit string) *bean.ResourceCapacity { + return &bean.ResourceCapacity{ + Value: value, + Unit: unit, + } +} + +// NewClusterStatusBreakdown creates a new ClusterStatusBreakdown with zero values +func NewClusterStatusBreakdown() *bean.ClusterStatusBreakdown { + return &bean.ClusterStatusBreakdown{ + Healthy: 0, + Unhealthy: 0, + ConnectionFailed: 0, + } +} + +// NewNodeSchedulingBreakdown creates a new NodeSchedulingBreakdown with initialized slices +func NewNodeSchedulingBreakdown() *bean.NodeSchedulingBreakdown { + return &bean.NodeSchedulingBreakdown{ + Schedulable: 0, + Unschedulable: 0, + Total: 0, + SchedulableNodes: []bean.NodeSchedulingDetail{}, + UnschedulableNodes: []bean.NodeSchedulingDetail{}, + } +} + +// NewNodeErrorBreakdown creates a new NodeErrorBreakdown with initialized error counts map +func NewNodeErrorBreakdown() *bean.NodeErrorBreakdown { + errorCounts := make(map[string]int) + // Initialize all error types with zero counts + errorCounts[constants.NodeErrorNetworkUnavailable] = 0 + errorCounts[constants.NodeErrorMemoryPressure] = 0 + errorCounts[constants.NodeErrorDiskPressure] = 0 + errorCounts[constants.NodeErrorPIDPressure] = 0 + errorCounts[constants.NodeErrorKubeletNotReady] = 0 + errorCounts[constants.NodeErrorOthers] = 0 + + return &bean.NodeErrorBreakdown{ + ErrorCounts: errorCounts, + Total: 0, + NodeErrors: []bean.NodeErrorDetail{}, + } +} + +// NewClusterDistribution creates a new ClusterDistribution with empty slices +func NewClusterDistribution() *bean.ClusterDistribution { + return &bean.ClusterDistribution{ + ByProvider: []bean.ProviderDistribution{}, + ByVersion: []bean.VersionDistribution{}, + } +} + +// NewNodeDistribution creates a new NodeDistribution with empty slices +func NewNodeDistribution() *bean.NodeDistribution { + return &bean.NodeDistribution{ + ByClusters: []bean.ClusterNodeCount{}, + ByAutoscaler: []bean.AutoscalerNodeCount{}, + } +} + +// NewClusterCapacityDistribution creates a new ClusterCapacityDistribution entry +func NewClusterCapacityDistribution(clusterID int, clusterName string, serverVersion string, cpuCapacity float64, cpuUtil, cpuRequest, cpuLimit float64, memCapacity float64, memUtil, memRequest, memLimit float64) bean.ClusterCapacityDistribution { + return bean.ClusterCapacityDistribution{ + ClusterID: clusterID, + ClusterName: clusterName, + ServerVersion: serverVersion, + CPU: NewClusterResourceMetric(cpuCapacity, cpuUtil, cpuRequest, cpuLimit), + Memory: NewClusterResourceMetric(memCapacity, memUtil, memRequest, memLimit), + } +} + +// NewClusterResourceMetric creates a new ClusterResourceMetric with capacity and percentages rounded to 2 decimal places +func NewClusterResourceMetric(capacity float64, utilPercent, requestPercent, limitPercent float64) *bean.ClusterResourceMetric { + return &bean.ClusterResourceMetric{ + Capacity: util.RoundToTwoDecimals(capacity), + UtilizationPercent: util.RoundToTwoDecimals(utilPercent), + RequestsPercent: util.RoundToTwoDecimals(requestPercent), + LimitsPercent: util.RoundToTwoDecimals(limitPercent), + } +} + +// NewClusterNodeCount creates a new ClusterNodeCount entry +func NewClusterNodeCount(clusterID int, clusterName string, nodeCount int) bean.ClusterNodeCount { + return bean.ClusterNodeCount{ + ClusterID: clusterID, + ClusterName: clusterName, + NodeCount: nodeCount, + } +} + +// NewNodeErrorDetail creates a new NodeErrorDetail entry +func NewNodeErrorDetail(nodeName, clusterName string, clusterID int, errors []string, nodeStatus string) bean.NodeErrorDetail { + return bean.NodeErrorDetail{ + NodeName: nodeName, + ClusterName: clusterName, + ClusterID: clusterID, + Errors: errors, + NodeStatus: nodeStatus, + } +} + +// NewNodeSchedulingDetail creates a new NodeSchedulingDetail entry +func NewNodeSchedulingDetail(nodeName, clusterName string, clusterID int, schedulable bool) bean.NodeSchedulingDetail { + return bean.NodeSchedulingDetail{ + NodeName: nodeName, + ClusterName: clusterName, + ClusterID: clusterID, + Schedulable: schedulable, + } +} + +// NewProviderDistribution creates a new ProviderDistribution entry +func NewProviderDistribution(provider string, count int) bean.ProviderDistribution { + return bean.ProviderDistribution{ + Provider: provider, + Count: count, + } +} + +// NewVersionDistribution creates a new VersionDistribution entry +func NewVersionDistribution(version string, count int) bean.VersionDistribution { + return bean.VersionDistribution{ + Version: version, + Count: count, + } +} + +// NewClusterOverviewNodeDetailedResponse creates a new ClusterOverviewNodeDetailedResponse +func NewClusterOverviewNodeDetailedResponse(totalCount int, nodeList []bean.ClusterOverviewNodeDetailedItem) *bean.ClusterOverviewNodeDetailedResponse { + return &bean.ClusterOverviewNodeDetailedResponse{ + TotalCount: totalCount, + NodeList: nodeList, + } +} + +// NewEmptyClusterOverviewNodeDetailedResponse creates an empty response for when cache is not found +func NewEmptyClusterOverviewNodeDetailedResponse() *bean.ClusterOverviewNodeDetailedResponse { + return &bean.ClusterOverviewNodeDetailedResponse{ + TotalCount: 0, + NodeList: []bean.ClusterOverviewNodeDetailedItem{}, + } +} + +// NewClusterUpgradeOverviewResponse creates a new ClusterUpgradeOverviewResponse +func NewClusterUpgradeOverviewResponse(canUpgrade bool, latestVersion string, clusterList []bean.ClusterUpgradeDetails) *bean.ClusterUpgradeOverviewResponse { + return &bean.ClusterUpgradeOverviewResponse{ + CanCurrentUserUpgrade: canUpgrade, + LatestVersion: latestVersion, + ClusterList: clusterList, + } +} diff --git a/pkg/overview/ClusterOverviewService.go b/pkg/overview/ClusterOverviewService.go new file mode 100644 index 0000000000..5484146d2d --- /dev/null +++ b/pkg/overview/ClusterOverviewService.go @@ -0,0 +1,1185 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/blang/semver/v4" + "github.com/devtron-labs/devtron/pkg/asyncProvider" + "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" + clusterService "github.com/devtron-labs/devtron/pkg/cluster" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" + "github.com/devtron-labs/devtron/pkg/k8s" + capacityService "github.com/devtron-labs/devtron/pkg/k8s/capacity" + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/devtron-labs/devtron/pkg/overview/config" + "github.com/devtron-labs/devtron/pkg/overview/constants" + overviewUtil "github.com/devtron-labs/devtron/pkg/overview/util" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// ClusterOverviewService provides cluster management overview functionality +type ClusterOverviewService interface { + GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) + GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) + RefreshClusterOverviewCache(ctx context.Context) error +} + +// ClusterOverviewServiceImpl implements ClusterOverviewService +type ClusterOverviewServiceImpl struct { + logger *zap.SugaredLogger + clusterService clusterService.ClusterService + k8sCapacityService capacityService.K8sCapacityService + clusterCacheService cache.ClusterCacheService + k8sCommonService k8s.K8sCommonService + enforcer casbin.Enforcer + config *config.ClusterOverviewConfig +} + +func NewClusterOverviewServiceImpl( + logger *zap.SugaredLogger, + clusterService clusterService.ClusterService, + k8sCapacityService capacityService.K8sCapacityService, + clusterCacheService cache.ClusterCacheService, + k8sCommonService k8s.K8sCommonService, + enforcer casbin.Enforcer, + cfg *config.ClusterOverviewConfig, +) *ClusterOverviewServiceImpl { + service := &ClusterOverviewServiceImpl{ + logger: logger, + clusterService: clusterService, + k8sCapacityService: k8sCapacityService, + clusterCacheService: clusterCacheService, + k8sCommonService: k8sCommonService, + enforcer: enforcer, + config: cfg, + } + + // Start background refresh worker if enabled + if cfg.CacheEnabled && cfg.BackgroundRefreshEnabled { + ctx := context.Background() + service.StartBackgroundRefresh(ctx) + logger.Info("Background cache refresh worker started") + } else { + logger.Info("Background cache refresh worker disabled") + } + + return service +} + +// StartBackgroundRefresh starts the background cache refresh worker +func (impl *ClusterOverviewServiceImpl) StartBackgroundRefresh(ctx context.Context) { + if !impl.config.CacheEnabled || !impl.config.BackgroundRefreshEnabled { + impl.logger.Info("Background refresh disabled") + return + } + + impl.logger.Infow("Starting background cache refresh worker", + "refreshInterval", impl.config.GetRefreshInterval(), + "maxParallelClusters", impl.config.MaxParallelClusters) + + // Initial cache population + go func() { + impl.logger.Info("Performing initial cache population") + if err := impl.refreshCache(ctx); err != nil { + impl.logger.Errorw("initial cache population failed", "err", err) + } + }() + + // Start periodic refresh + ticker := time.NewTicker(impl.config.GetRefreshInterval()) + go func() { + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + impl.logger.Info("background refresh worker stopped") + return + case <-ticker.C: + impl.logger.Info("background refresh triggered") + if err := impl.refreshCache(ctx); err != nil { + impl.logger.Errorw("background cache refresh failed", "err", err) + } + } + } + }() +} + +// RefreshClusterOverviewCache manually triggers a cache refresh +func (impl *ClusterOverviewServiceImpl) RefreshClusterOverviewCache(ctx context.Context) error { + return impl.refreshCache(ctx) +} + +// refreshCache fetches fresh data and updates cache +func (impl *ClusterOverviewServiceImpl) refreshCache(ctx context.Context) error { + // Prevent concurrent refreshes + if impl.clusterCacheService.IsRefreshing() { + impl.logger.Debug("Cache refresh already in progress, skipping") + return nil + } + + impl.clusterCacheService.SetRefreshing(true) + defer impl.clusterCacheService.SetRefreshing(false) + + startTime := time.Now() + impl.logger.Debug("Starting cache refresh") + + // Fetch clusters + clusters, err := impl.clusterService.FindActiveClustersExcludingVirtual() + if err != nil { + impl.logger.Errorw("error fetching clusters for cache refresh", "err", err) + return err + } + + // Fetch cluster data in parallel + response, err := impl.fetchClusterDataParallel(ctx, clusters) + if err != nil { + impl.logger.Errorw("error fetching cluster data for cache refresh", "err", err) + return err + } + + // Update cache + if err := impl.clusterCacheService.SetClusterOverview(response); err != nil { + impl.logger.Errorw("error updating cache", "err", err) + return err + } + + duration := time.Since(startTime) + impl.logger.Infow("Cache refresh completed", "duration", duration, "clusterCount", len(clusters), "totalClusters", response.TotalClusters) + + return nil +} + +// fetchClusterDataParallel fetches cluster data using worker pool for parallel execution +func (impl *ClusterOverviewServiceImpl) fetchClusterDataParallel(ctx context.Context, clusters []clusterBean.ClusterBean) (*bean.ClusterOverviewResponse, error) { + if len(clusters) == 0 { + return NewEmptyClusterOverviewResponse(), nil + } + + // Separate clusters into valid and error clusters + validClusters := make([]clusterBean.ClusterBean, 0, len(clusters)) + errorClusters := make([]clusterBean.ClusterBean, 0) + + for _, cluster := range clusters { + if len(cluster.ErrorInConnecting) > 0 { + impl.logger.Debugw("Skipping cluster with connection error", "clusterId", cluster.Id, "clusterName", cluster.ClusterName, "error", cluster.ErrorInConnecting) + errorClusters = append(errorClusters, cluster) + continue + } + validClusters = append(validClusters, cluster) + } + + if len(errorClusters) > 0 { + impl.logger.Infow("Skipped clusters with connection errors", "skippedCount", len(errorClusters), "validCount", len(validClusters), "totalCount", len(clusters)) + } + + // Create placeholder capacity details for clusters with errors + errorClusterDetails := make([]*capacityBean.ClusterCapacityDetail, 0, len(errorClusters)) + for _, cluster := range errorClusters { + errorClusterDetails = append(errorClusterDetails, &capacityBean.ClusterCapacityDetail{ + Id: cluster.Id, + Name: cluster.ClusterName, + ErrorInConnection: cluster.ErrorInConnecting, + Status: capacityBean.ClusterStatusConnectionFailed, + IsVirtualCluster: cluster.IsVirtualCluster, + IsProd: cluster.IsProd, + }) + } + + // If all clusters have connection errors, return response with error clusters only + if len(validClusters) == 0 { + impl.logger.Warn("All clusters have connection errors, returning response with error clusters only") + allClusterPointers := make([]*clusterBean.ClusterBean, len(clusters)) + for i := range clusters { + allClusterPointers[i] = &clusters[i] + } + return impl.aggregateClusterCapacityDetails(ctx, errorClusterDetails, allClusterPointers), nil + } + + // Create worker pool with configured parallelism + wp := asyncProvider.NewBatchWorker[*capacityBean.ClusterCapacityDetail]( + impl.config.MaxParallelClusters, + impl.logger, + ) + wp.InitializeResponse() + + // Convert to pointer slice (only valid clusters) + clusterPointers := make([]*clusterBean.ClusterBean, len(validClusters)) + for i := range validClusters { + clusterPointers[i] = &validClusters[i] + } + + // Submit cluster fetch tasks to worker pool + for _, cluster := range clusterPointers { + clusterCopy := cluster // Capture for closure + wp.Submit(func() (*capacityBean.ClusterCapacityDetail, error) { + impl.logger.Debugw("Fetching cluster capacity", "clusterId", clusterCopy.Id, "clusterName", clusterCopy.ClusterName) + + // Fetch cluster capacity detail + detail, err := impl.k8sCapacityService.GetClusterCapacityDetail(ctx, clusterCopy, false) + if err != nil { + impl.logger.Warnw("error fetching cluster capacity, skipping", "clusterId", clusterCopy.Id, "clusterName", clusterCopy.ClusterName, "err", err) + // Return error to skip this cluster + return nil, err + } + + // Set cluster metadata + detail.Id = clusterCopy.Id + detail.Name = clusterCopy.ClusterName + detail.IsVirtualCluster = clusterCopy.IsVirtualCluster + detail.IsProd = clusterCopy.IsProd + + return detail, nil + }) + } + + // Wait for all tasks to complete + if err := wp.StopWait(); err != nil { + impl.logger.Errorw("error waiting for worker pool tasks", "err", err) + // Continue anyway to return partial results + } + + // Get results from worker pool + results := wp.GetResponse() + + // Combine successful results with error cluster placeholders + allClusterDetails := make([]*capacityBean.ClusterCapacityDetail, 0, len(results)+len(errorClusterDetails)) + allClusterDetails = append(allClusterDetails, results...) + allClusterDetails = append(allClusterDetails, errorClusterDetails...) + + // Create combined cluster bean list (all clusters) + allClusterPointers := make([]*clusterBean.ClusterBean, len(clusters)) + for i := range clusters { + allClusterPointers[i] = &clusters[i] + } + + // Log summary + successCount := len(results) + failedCount := len(validClusters) - successCount + if failedCount > 0 || len(errorClusters) > 0 { + impl.logger.Infow("Cluster fetch summary", "successCount", successCount, "failedCount", failedCount, "skippedCount", len(errorClusters), "totalClusters", len(clusters)) + } + + // Aggregate all results (including error clusters) into response + return impl.aggregateClusterCapacityDetails(ctx, allClusterDetails, allClusterPointers), nil +} + +// aggregateClusterCapacityDetails aggregates cluster capacity details into overview response +func (impl *ClusterOverviewServiceImpl) aggregateClusterCapacityDetails(ctx context.Context, details []*capacityBean.ClusterCapacityDetail, clusterBeans []*clusterBean.ClusterBean) *bean.ClusterOverviewResponse { + return impl.buildClusterOverviewResponse(ctx, details, clusterBeans) +} + +// GetClusterOverview retrieves comprehensive cluster management overview +// Returns from cache if enabled and available, otherwise fetches directly +func (impl *ClusterOverviewServiceImpl) GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + // If cache is disabled, fetch directly + if !impl.config.CacheEnabled { + impl.logger.Debug("Cache disabled, fetching cluster overview directly") + return impl.fetchClusterOverviewDirect(ctx) + } + + // Try to get from cache + if cachedData, found := impl.clusterCacheService.GetClusterOverview(); found { + return impl.handleCacheHit(cachedData) + } + + // Cache miss - fallback to direct fetch + return impl.handleCacheMiss(ctx) +} + +// handleCacheHit processes a cache hit and returns the cached data +func (impl *ClusterOverviewServiceImpl) handleCacheHit(cachedData *bean.ClusterOverviewResponse) (*bean.ClusterOverviewResponse, error) { + cacheAge := impl.clusterCacheService.GetCacheAge() + + // Warn if cache is stale but return it anyway + if cacheAge > impl.config.GetMaxStaleDataDuration() { + impl.logger.Warnw("cache is stale but returning anyway", + "cacheAge", cacheAge, + "maxStaleAge", impl.config.GetMaxStaleDataDuration()) + } + + impl.logger.Infow("returning cluster overview from cache", "cacheAge", cacheAge) + return cachedData, nil +} + +// handleCacheMiss handles cache miss by attempting to refresh cache or fetching directly +func (impl *ClusterOverviewServiceImpl) handleCacheMiss(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + impl.logger.Warn("cache miss - background refresh may not be running, attempting fallback") + + // Try to refresh cache synchronously + if err := impl.refreshCache(ctx); err != nil { + impl.logger.Errorw("error refreshing cache synchronously, falling back to direct fetch", "err", err) + // Fallback to direct fetch without caching + return impl.fetchClusterOverviewDirect(ctx) + } + + // Try to get from cache after refresh + if cachedData, found := impl.clusterCacheService.GetClusterOverview(); found { + impl.logger.Info("successfully populated cache, returning data") + return cachedData, nil + } + + // Cache refresh succeeded but data not in cache (shouldn't happen) + impl.logger.Warn("cache refresh succeeded but data not found in cache, falling back to direct fetch") + return impl.fetchClusterOverviewDirect(ctx) +} + +// fetchClusterOverviewDirect fetches cluster overview without using cache +func (impl *ClusterOverviewServiceImpl) fetchClusterOverviewDirect(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + impl.logger.Debug("Fetching cluster overview directly (bypassing cache)") + + // Fetch active clusters + clusters, err := impl.clusterService.FindActiveClustersExcludingVirtual() + if err != nil { + impl.logger.Errorw("error fetching clusters", "err", err) + return nil, fmt.Errorf("failed to fetch clusters: %w", err) + } + + // Fetch cluster data in parallel + response, err := impl.fetchClusterDataParallel(ctx, clusters) + if err != nil { + impl.logger.Errorw("error fetching cluster data", "err", err) + return nil, fmt.Errorf("failed to fetch cluster data: %w", err) + } + + impl.logger.Infow("successfully fetched cluster overview directly", "totalClusters", response.TotalClusters) + return response, nil +} + +func (impl *ClusterOverviewServiceImpl) buildClusterOverviewResponse(ctx context.Context, clusterCapacityDetails []*capacityBean.ClusterCapacityDetail, clusterBeans []*clusterBean.ClusterBean) *bean.ClusterOverviewResponse { + // Initialize response using adapter + response := NewClusterOverviewResponse(len(clusterCapacityDetails)) + + // Tracking variables for aggregation + var totalCpuCapacityCores, totalMemoryCapacityGi float64 + providerCounts := make(map[string]int) + versionCounts := make(map[string]int) + autoscalerCounts := make(map[string]int) + autoscalerNodeDetailsMap := make(map[string][]bean.AutoscalerNodeDetail) + + // Create a map of cluster ID to cluster bean for quick lookup + clusterBeanMap := make(map[int]*clusterBean.ClusterBean) + for _, cb := range clusterBeans { + clusterBeanMap[cb.Id] = cb + } + + // Process each cluster to extract and aggregate data + for _, cluster := range clusterCapacityDetails { + impl.processClusterStatus(cluster, response) + + if len(cluster.ErrorInConnection) == 0 { + metrics := impl.processClusterCapacity(cluster, &totalCpuCapacityCores, &totalMemoryCapacityGi) + impl.addClusterCapacityDistribution(cluster, response, metrics) + + // Get the corresponding cluster bean for autoscaler detection + if clusterBeanForAutoscaler, exists := clusterBeanMap[cluster.Id]; exists { + impl.processNodeDistributionAndAutoscaler(ctx, cluster, clusterBeanForAutoscaler, response, autoscalerCounts, autoscalerNodeDetailsMap) + } else { + impl.logger.Warnw("cluster bean not found for autoscaler detection", + "clusterId", cluster.Id, + "clusterName", cluster.Name) + } + impl.aggregateClusterMetadata(cluster, providerCounts, versionCounts) + } + impl.processNodeDetails(cluster, response) + impl.aggregateNodeErrorCounts(cluster, response) + + } + + impl.finalizeResponse(response, totalCpuCapacityCores, totalMemoryCapacityGi, providerCounts, versionCounts, autoscalerNodeDetailsMap) + + return response +} + +// processClusterStatus updates cluster status breakdown based on cluster health +func (impl *ClusterOverviewServiceImpl) processClusterStatus(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + if cluster.Status == capacityBean.ClusterStatusHealthy { + response.ClusterStatusBreakdown.Healthy++ + } else if cluster.Status == capacityBean.ClusterStatusConnectionFailed { + response.ClusterStatusBreakdown.ConnectionFailed++ + } else { + response.ClusterStatusBreakdown.Unhealthy++ + } +} + +// clusterCapacityMetrics holds parsed capacity metrics for a cluster +type clusterCapacityMetrics struct { + cpuCapacity float64 + cpuUtil float64 + cpuRequest float64 + cpuLimit float64 + memoryCapacity float64 + memoryUtil float64 + memoryRequest float64 + memoryLimit float64 +} + +// processClusterCapacity extracts and aggregates CPU and memory metrics from cluster +func (impl *ClusterOverviewServiceImpl) processClusterCapacity(cluster *capacityBean.ClusterCapacityDetail, totalCpu, totalMemory *float64) clusterCapacityMetrics { + metrics := clusterCapacityMetrics{} + + // Process CPU metrics + if cluster.Cpu != nil { + cpuCapacityFloat, err := strconv.ParseFloat(cluster.Cpu.Capacity, 64) + if err != nil { + impl.logger.Errorw("error in parsing cpu capacity", "err", err, "capacity", cluster.Cpu.Capacity) + cpuCapacityFloat = 0 + } + metrics.cpuCapacity = cpuCapacityFloat + *totalCpu += cpuCapacityFloat + + metrics.cpuUtil, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Cpu.UsagePercentage, "%"), 64) + metrics.cpuRequest, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Cpu.RequestPercentage, "%"), 64) + metrics.cpuLimit, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Cpu.LimitPercentage, "%"), 64) + } + + // Process Memory metrics + if cluster.Memory != nil { + memoryCapacityStr := strings.TrimSuffix(cluster.Memory.Capacity, "Gi") + memoryCapacityFloat, err := strconv.ParseFloat(memoryCapacityStr, 64) + if err != nil { + impl.logger.Errorw("error in parsing memory capacity", "err", err, "capacity", cluster.Memory.Capacity) + memoryCapacityFloat = 0 + } + metrics.memoryCapacity = memoryCapacityFloat + *totalMemory += memoryCapacityFloat + + metrics.memoryUtil, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Memory.UsagePercentage, "%"), 64) + metrics.memoryRequest, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Memory.RequestPercentage, "%"), 64) + metrics.memoryLimit, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Memory.LimitPercentage, "%"), 64) + } + + return metrics +} + +// addClusterCapacityDistribution adds cluster capacity distribution entry to response +func (impl *ClusterOverviewServiceImpl) addClusterCapacityDistribution(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse, metrics clusterCapacityMetrics) { + response.ClusterCapacityDistribution = append(response.ClusterCapacityDistribution, + NewClusterCapacityDistribution( + cluster.Id, + cluster.Name, + cluster.ServerVersion, + metrics.cpuCapacity, + metrics.cpuUtil, + metrics.cpuRequest, + metrics.cpuLimit, + metrics.memoryCapacity, + metrics.memoryUtil, + metrics.memoryRequest, + metrics.memoryLimit, + )) +} + +// processNodeDistributionAndAutoscaler adds cluster node count to distribution and aggregates autoscaler counts across all clusters +func (impl *ClusterOverviewServiceImpl) processNodeDistributionAndAutoscaler(ctx context.Context, cluster *capacityBean.ClusterCapacityDetail, clusterBean *clusterBean.ClusterBean, response *bean.ClusterOverviewResponse, autoscalerCounts map[string]int, autoscalerNodeDetailsMap map[string][]bean.AutoscalerNodeDetail) { + // Add cluster node count to distribution + response.NodeDistribution.ByClusters = append(response.NodeDistribution.ByClusters, + NewClusterNodeCount(cluster.Id, cluster.Name, cluster.NodeCount)) + + // Fetch node details with labels to determine autoscaler types + nodeCapacityDetails, err := impl.k8sCapacityService.GetNodeCapacityDetailsListByCluster(ctx, clusterBean) + if err != nil { + impl.logger.Errorw("error fetching node capacity details for autoscaler detection, skipping autoscaler aggregation", + "clusterId", cluster.Id, + "clusterName", cluster.Name, + "err", err) + return + } + + // Process each node to determine autoscaler type and aggregate globally + for _, nodeDetail := range nodeCapacityDetails { + autoscalerType := overviewUtil.DetermineAutoscalerTypeFromLabelArray(nodeDetail.Labels) + + // Add to global autoscaler counts + autoscalerCounts[autoscalerType]++ + + // Collect node details for this autoscaler type globally across all clusters + autoscalerNodeDetailsMap[autoscalerType] = append(autoscalerNodeDetailsMap[autoscalerType], bean.AutoscalerNodeDetail{ + NodeName: nodeDetail.Name, + ClusterName: cluster.Name, + ClusterID: cluster.Id, + ManagedBy: autoscalerType, + }) + } +} + +// processNodeDetails processes node details to populate scheduling and error information +func (impl *ClusterOverviewServiceImpl) processNodeDetails(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + if cluster.NodeDetails == nil { + return + } + + // Build node errors map for quick lookup + nodeErrorsMap := impl.buildNodeErrorsMap(cluster.NodeErrors) + + // Process each node + for _, nodeDetail := range cluster.NodeDetails { + if errorTypes, hasErrors := nodeErrorsMap[nodeDetail.NodeName]; hasErrors { + impl.addNodeWithErrors(nodeDetail, cluster, errorTypes, response) + } else { + impl.addSchedulableNode(nodeDetail, cluster, response) + } + } +} + +// buildNodeErrorsMap creates a map of node names to their error types +func (impl *ClusterOverviewServiceImpl) buildNodeErrorsMap(nodeErrors map[corev1.NodeConditionType][]string) map[string][]string { + nodeErrorsMap := make(map[string][]string) + for conditionType, nodeNames := range nodeErrors { + for _, nodeName := range nodeNames { + errorType := impl.getHumanReadableErrorType(conditionType) + nodeErrorsMap[nodeName] = append(nodeErrorsMap[nodeName], errorType) + } + } + return nodeErrorsMap +} + +// addNodeWithErrors adds a node with errors to error breakdown and unschedulable nodes +func (impl *ClusterOverviewServiceImpl) addNodeWithErrors(nodeDetail capacityBean.NodeDetails, cluster *capacityBean.ClusterCapacityDetail, errorTypes []string, response *bean.ClusterOverviewResponse) { + nodeStatus := "Not Ready" + if len(errorTypes) == 0 { + nodeStatus = "Ready" + } + + // Store errors as array directly, no need to convert to comma-separated string + response.NodeErrorBreakdown.NodeErrors = append(response.NodeErrorBreakdown.NodeErrors, + NewNodeErrorDetail(nodeDetail.NodeName, cluster.Name, cluster.Id, errorTypes, nodeStatus)) + + response.NodeSchedulingBreakdown.UnschedulableNodes = append(response.NodeSchedulingBreakdown.UnschedulableNodes, + NewNodeSchedulingDetail(nodeDetail.NodeName, cluster.Name, cluster.Id, false)) + response.NodeSchedulingBreakdown.Unschedulable++ +} + +// addSchedulableNode adds a schedulable node to the scheduling breakdown +func (impl *ClusterOverviewServiceImpl) addSchedulableNode(nodeDetail capacityBean.NodeDetails, cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + response.NodeSchedulingBreakdown.SchedulableNodes = append(response.NodeSchedulingBreakdown.SchedulableNodes, + NewNodeSchedulingDetail(nodeDetail.NodeName, cluster.Name, cluster.Id, true)) + response.NodeSchedulingBreakdown.Schedulable++ +} + +// aggregateNodeErrorCounts aggregates node error counts by error type +func (impl *ClusterOverviewServiceImpl) aggregateNodeErrorCounts(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + for conditionType, nodeNames := range cluster.NodeErrors { + errorCount := len(nodeNames) + + switch conditionType { + case constants.NodeConditionNetworkUnavailable: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorNetworkUnavailable] += errorCount + case constants.NodeConditionMemoryPressure: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorMemoryPressure] += errorCount + case constants.NodeConditionDiskPressure: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorDiskPressure] += errorCount + case constants.NodeConditionPIDPressure: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorPIDPressure] += errorCount + case constants.NodeConditionReady: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorKubeletNotReady] += errorCount + default: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorOthers] += errorCount + } + } +} + +// aggregateClusterMetadata aggregates cluster metadata (provider and version) +func (impl *ClusterOverviewServiceImpl) aggregateClusterMetadata(cluster *capacityBean.ClusterCapacityDetail, providerCounts, versionCounts map[string]int) { + provider := impl.determineProviderFromCluster(cluster) + providerCounts[provider]++ + + version := impl.extractMajorMinorVersion(cluster.ServerVersion) + versionCounts[version]++ +} + +// finalizeResponse sets total values and builds distribution arrays +func (impl *ClusterOverviewServiceImpl) finalizeResponse(response *bean.ClusterOverviewResponse, totalCpu, totalMemory float64, providerCounts, versionCounts map[string]int, autoscalerNodeDetailsMap map[string][]bean.AutoscalerNodeDetail) { + // Set total capacity values with 2 decimal precision + response.TotalCpuCapacity.Value = fmt.Sprintf("%.2f", overviewUtil.RoundToTwoDecimals(totalCpu)) + response.TotalMemoryCapacity.Value = fmt.Sprintf("%.2f", overviewUtil.RoundToTwoDecimals(totalMemory)) + + // Build provider distribution + for provider, count := range providerCounts { + response.ClusterDistribution.ByProvider = append(response.ClusterDistribution.ByProvider, + NewProviderDistribution(provider, count)) + } + + // Build version distribution + for version, count := range versionCounts { + response.ClusterDistribution.ByVersion = append(response.ClusterDistribution.ByVersion, + NewVersionDistribution(version, count)) + } + + // Build autoscaler distribution - aggregated across all clusters + for autoscalerType, nodeDetails := range autoscalerNodeDetailsMap { + response.NodeDistribution.ByAutoscaler = append(response.NodeDistribution.ByAutoscaler, bean.AutoscalerNodeCount{ + AutoscalerType: autoscalerType, + NodeCount: len(nodeDetails), + NodeDetails: nodeDetails, + }) + } + + // Set total counts for breakdowns + response.NodeSchedulingBreakdown.Total = response.NodeSchedulingBreakdown.Schedulable + response.NodeSchedulingBreakdown.Unschedulable + response.NodeErrorBreakdown.Total = len(response.NodeErrorBreakdown.NodeErrors) +} + +// getHumanReadableErrorType converts Kubernetes node condition types to human-readable error types +func (impl *ClusterOverviewServiceImpl) getHumanReadableErrorType(conditionType corev1.NodeConditionType) string { + switch conditionType { + case corev1.NodeNetworkUnavailable: + return constants.NodeErrorNetworkUnavailable + case corev1.NodeMemoryPressure: + return constants.NodeErrorMemoryPressure + case corev1.NodeDiskPressure: + return constants.NodeErrorDiskPressure + case corev1.NodePIDPressure: + return constants.NodeErrorPIDPressure + case corev1.NodeReady: + return constants.NodeErrorKubeletNotReady + default: + return constants.NodeErrorOthers + } +} + +func (impl *ClusterOverviewServiceImpl) determineProviderFromCluster(cluster *capacityBean.ClusterCapacityDetail) string { + if cluster.NodeDetails != nil && len(cluster.NodeDetails) > 0 { + for _, nodeDetail := range cluster.NodeDetails { + provider := impl.determineProviderFromNodeName(nodeDetail.NodeName) + if provider != constants.ProviderUnknown { + return provider + } + } + } + + return constants.ProviderUnknown +} + +// determineProviderFromNodeName determines cloud provider from node name patterns +func (impl *ClusterOverviewServiceImpl) determineProviderFromNodeName(nodeName string) string { + nodeNameLower := strings.ToLower(nodeName) + + // Google Cloud Platform (GKE) patterns + // Examples: gke-shared-cluster-ci-nodes-818049c0-6knz, gke-cluster-default-pool-12345678-abcd + if strings.HasPrefix(nodeNameLower, constants.NodePrefixGKE) { + return constants.ProviderGCP + } + + // Azure (AKS) patterns + // Examples: aks-newpool-37469834-vmss000000, aks-nodepool1-12345678-vmss000001 + if strings.HasPrefix(nodeNameLower, constants.NodePrefixAKS) { + return constants.ProviderAzure + } + + // AWS (EKS) patterns + // Examples: ip-192-168-1-100.us-west-2.compute.internal, ip-10-0-1-50.ec2.internal + if strings.Contains(nodeNameLower, constants.NodePatternAWSComputeInternal) || strings.Contains(nodeNameLower, constants.NodePatternAWSEC2Internal) { + return constants.ProviderAWS + } + // EKS managed node groups: eks-nodegroup-12345678-abcd + if strings.HasPrefix(nodeNameLower, constants.NodePrefixEKS) { + return constants.ProviderAWS + } + + // Additional AWS patterns: nodes with AWS region patterns + for _, pattern := range constants.AWSRegionPatterns { + if strings.Contains(nodeNameLower, pattern) { + return constants.ProviderAWS + } + } + + // Oracle Cloud (OKE) patterns + // Examples: oke-cywiqripuyg-nsgagklgnst-st2qczvnmba-0, oke-c1a2b3c4d5e-n6f7g8h9i0j-s1k2l3m4n5o-1 + if strings.HasPrefix(nodeNameLower, constants.NodePrefixOKE) { + return constants.ProviderOracle + } + + // DigitalOcean (DOKS) patterns + // Examples: pool--, nodes often contain "digitalocean" in metadata + if strings.Contains(nodeNameLower, constants.NodePatternDigitalOcean) { + return constants.ProviderDigitalOcean + } + + // IBM Cloud (IKS) patterns + // Examples: kube--, 10.x.x.x.kube- + if strings.Contains(nodeNameLower, constants.NodePatternIBMKube) { + return constants.ProviderIBM + } + + // Alibaba Cloud (ACK) patterns + // Examples: aliyun.com-59176-test, cn-hangzhou.i-bp12h6biv9bg24lmdc2o + // Nodes often contain "aliyun" in their names or "cn-" prefix for Chinese regions + if strings.Contains(nodeNameLower, constants.NodePatternAliyun) { + return constants.ProviderAlibaba + } + // Alibaba Cloud region patterns (cn-hangzhou, cn-beijing, etc.) + if strings.HasPrefix(nodeNameLower, constants.NodePatternAlibabaRegion) { + return constants.ProviderAlibaba + } + + // Additional Azure patterns: nodes with Azure region indicators + if strings.Contains(nodeNameLower, constants.NodePatternAzureVMSS) || strings.Contains(nodeNameLower, constants.NodePatternAzureScaleSets) { + return constants.ProviderAzure + } + + // Additional GCP patterns + if strings.Contains(nodeNameLower, constants.NodePatternGCP) || strings.Contains(nodeNameLower, constants.NodePatternGoogle) { + return constants.ProviderGCP + } + + return constants.ProviderUnknown +} + +// extractMajorMinorVersion extracts major.minor version from Kubernetes version string using semver +// Examples: "v1.28.3" -> "1.28", "1.29.0-gke.1234" -> "1.29", "v1.30" -> "1.30" +func (impl *ClusterOverviewServiceImpl) extractMajorMinorVersion(version string) string { + if version == "" { + return constants.VersionUnknown + } + + cleanVersion := version + if strings.HasPrefix(version, "v") { + cleanVersion = version[1:] + } + + // Parse using semver library (same as ClusterUpgradeService) + semverVersion, err := semver.Parse(cleanVersion) + if err != nil { + impl.logger.Warnw("failed to parse version using semver, falling back to string parsing", "version", version, "err", err) + // Fallback to simple string parsing if semver fails + parts := strings.Split(cleanVersion, ".") + if len(parts) >= 2 { + return fmt.Sprintf("%s.%s", parts[0], parts[1]) + } + return constants.VersionUnknown + } + + // Use the same approach as ClusterUpgradeService: TrimToMajorAndMinorVersion + return fmt.Sprintf("%d.%d", semverVersion.Major, semverVersion.Minor) +} + +// GetClusterOverviewDetailedNodeInfo retrieves paginated and filtered node details from cache based on node view group type +func (impl *ClusterOverviewServiceImpl) GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + clusterOverview, found := impl.clusterCacheService.GetClusterOverview() + if !found { + impl.logger.Warnw("cluster overview cache not found, returning empty response") + return NewEmptyClusterOverviewNodeDetailedResponse(), nil + } + + switch request.GroupBy { + case bean.NodeViewGroupTypeNodeErrors: + return impl.getNodeErrorsDetail(clusterOverview, request) + case bean.NodeViewGroupTypeNodeScheduling: + return impl.getNodeSchedulingDetail(clusterOverview, request) + case bean.NodeViewGroupTypeAutoscaler: + return impl.getAutoscalerDetail(clusterOverview, request) + default: + return nil, fmt.Errorf("invalid node view group type: %s", request.GroupBy) + } +} + +// getNodeErrorsDetail retrieves paginated and filtered node error details +func (impl *ClusterOverviewServiceImpl) getNodeErrorsDetail(clusterOverview *bean.ClusterOverviewResponse, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + // Get all node errors from cache + allNodes := clusterOverview.NodeErrorBreakdown.NodeErrors + + // Apply error type filter if specified + if request.ErrorType != "" { + allNodes = impl.filterNodeErrorsByType(allNodes, request.ErrorType) + } + + // Apply search filter + filteredNodes := impl.filterNodeErrors(allNodes, request.SearchKey) + + // Apply sorting + sortedNodes := impl.sortNodeErrors(filteredNodes, request.SortBy, request.SortOrder) + + // Apply pagination + totalCount := len(sortedNodes) + paginatedNodes := impl.paginateNodeErrors(sortedNodes, request.Offset, request.Limit) + + // Convert to unified response format + unifiedNodes := make([]bean.ClusterOverviewNodeDetailedItem, len(paginatedNodes)) + for i, node := range paginatedNodes { + unifiedNodes[i] = bean.ClusterOverviewNodeDetailedItem{ + NodeName: node.NodeName, + ClusterName: node.ClusterName, + ClusterID: node.ClusterID, + NodeErrors: node.Errors, + NodeStatus: node.NodeStatus, + } + } + + return NewClusterOverviewNodeDetailedResponse(totalCount, unifiedNodes), nil +} + +// getNodeSchedulingDetail retrieves paginated and filtered node scheduling details +func (impl *ClusterOverviewServiceImpl) getNodeSchedulingDetail(clusterOverview *bean.ClusterOverviewResponse, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + // Filter by schedulable type if specified + var allNodes []bean.NodeSchedulingDetail + if request.SchedulableType != "" { + switch request.SchedulableType { + case constants.SchedulableTypeSchedulable: + allNodes = clusterOverview.NodeSchedulingBreakdown.SchedulableNodes + case constants.SchedulableTypeUnschedulable: + allNodes = clusterOverview.NodeSchedulingBreakdown.UnschedulableNodes + default: + // Invalid schedulableType, return all nodes + allNodes = append(clusterOverview.NodeSchedulingBreakdown.SchedulableNodes, clusterOverview.NodeSchedulingBreakdown.UnschedulableNodes...) + } + } else { + // Combine schedulable and unschedulable nodes if no filter specified + allNodes = append(clusterOverview.NodeSchedulingBreakdown.SchedulableNodes, clusterOverview.NodeSchedulingBreakdown.UnschedulableNodes...) + } + + // Apply search filter + filteredNodes := impl.filterNodeScheduling(allNodes, request.SearchKey) + + // Apply sorting + sortedNodes := impl.sortNodeScheduling(filteredNodes, request.SortBy, request.SortOrder) + + // Apply pagination + totalCount := len(sortedNodes) + paginatedNodes := impl.paginateNodeScheduling(sortedNodes, request.Offset, request.Limit) + + unifiedNodes := make([]bean.ClusterOverviewNodeDetailedItem, len(paginatedNodes)) + for i, node := range paginatedNodes { + unifiedNodes[i] = bean.ClusterOverviewNodeDetailedItem{ + NodeName: node.NodeName, + ClusterName: node.ClusterName, + ClusterID: node.ClusterID, + Schedulable: node.Schedulable, + } + } + + return NewClusterOverviewNodeDetailedResponse(totalCount, unifiedNodes), nil +} + +// getAutoscalerDetail retrieves paginated and filtered autoscaler node details +func (impl *ClusterOverviewServiceImpl) getAutoscalerDetail(clusterOverview *bean.ClusterOverviewResponse, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + // Filter by autoscaler type if specified + var allNodes []bean.AutoscalerNodeDetail + if request.AutoscalerType != "" { + // Get nodes only for the specified autoscaler type + for _, autoscalerGroup := range clusterOverview.NodeDistribution.ByAutoscaler { + if autoscalerGroup.AutoscalerType == request.AutoscalerType { + allNodes = append(allNodes, autoscalerGroup.NodeDetails...) + break + } + } + } else { + // Combine all autoscaler nodes from all autoscaler types + for _, autoscalerGroup := range clusterOverview.NodeDistribution.ByAutoscaler { + allNodes = append(allNodes, autoscalerGroup.NodeDetails...) + } + } + + // Apply search filter + filteredNodes := impl.filterAutoscalerNodes(allNodes, request.SearchKey) + + // Apply sorting + sortedNodes := impl.sortAutoscalerNodes(filteredNodes, request.SortBy, request.SortOrder) + + // Apply pagination + totalCount := len(sortedNodes) + paginatedNodes := impl.paginateAutoscalerNodes(sortedNodes, request.Offset, request.Limit) + + unifiedNodes := make([]bean.ClusterOverviewNodeDetailedItem, len(paginatedNodes)) + for i, node := range paginatedNodes { + unifiedNodes[i] = bean.ClusterOverviewNodeDetailedItem{ + NodeName: node.NodeName, + ClusterName: node.ClusterName, + ClusterID: node.ClusterID, + AutoscalerType: node.ManagedBy, + } + } + + return NewClusterOverviewNodeDetailedResponse(totalCount, unifiedNodes), nil +} + +// Helper methods for node error filtering, sorting, and pagination + +// filterNodeErrorsByType filters nodes by specific error type +func (impl *ClusterOverviewServiceImpl) filterNodeErrorsByType(nodes []bean.NodeErrorDetail, errorType string) []bean.NodeErrorDetail { + if errorType == "" { + return nodes + } + + var filtered []bean.NodeErrorDetail + errorTypeLower := strings.ToLower(errorType) + + for _, node := range nodes { + // Check if the node's error array contains the specified error type + for _, err := range node.Errors { + if strings.ToLower(err) == errorTypeLower { + filtered = append(filtered, node) + break // Found the error, no need to check other errors for this node + } + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) filterNodeErrors(nodes []bean.NodeErrorDetail, searchKey string) []bean.NodeErrorDetail { + if searchKey == "" { + return nodes + } + + searchKey = strings.ToLower(searchKey) + var filtered []bean.NodeErrorDetail + + for _, node := range nodes { + // Check if search key matches node name, cluster name, or node status + if strings.Contains(strings.ToLower(node.NodeName), searchKey) || + strings.Contains(strings.ToLower(node.ClusterName), searchKey) || + strings.Contains(strings.ToLower(node.NodeStatus), searchKey) { + filtered = append(filtered, node) + continue + } + + // Check if search key matches any error in the errors array + for _, err := range node.Errors { + if strings.Contains(strings.ToLower(err), searchKey) { + filtered = append(filtered, node) + break // Found match, no need to check other errors + } + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) sortNodeErrors(nodes []bean.NodeErrorDetail, sortBy, sortOrder string) []bean.NodeErrorDetail { + if sortBy == "" { + sortBy = constants.SortFieldNodeName // default sort + } + if sortOrder == "" { + sortOrder = constants.SortOrderAsc // default order + } + + // Create a copy to avoid modifying the original + sorted := make([]bean.NodeErrorDetail, len(nodes)) + copy(sorted, nodes) + + sort.Slice(sorted, func(i, j int) bool { + var compareResult int + + switch sortBy { + case constants.SortFieldNodeName: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + case constants.SortFieldClusterName: + compareResult = strings.Compare(sorted[i].ClusterName, sorted[j].ClusterName) + case constants.SortFieldNodeErrors: + // Sort by joining the error array for comparison + errorsI := strings.Join(sorted[i].Errors, ", ") + errorsJ := strings.Join(sorted[j].Errors, ", ") + compareResult = strings.Compare(errorsI, errorsJ) + case constants.SortFieldNodeStatus: + compareResult = strings.Compare(sorted[i].NodeStatus, sorted[j].NodeStatus) + default: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + } + + if sortOrder == constants.SortOrderDesc { + return compareResult > 0 + } + return compareResult < 0 + }) + + return sorted +} + +func (impl *ClusterOverviewServiceImpl) paginateNodeErrors(nodes []bean.NodeErrorDetail, offset, limit int) []bean.NodeErrorDetail { + if limit <= 0 { + limit = 10 // default limit + } + + start := offset + if start < 0 { + start = 0 + } + if start >= len(nodes) { + return []bean.NodeErrorDetail{} + } + + end := start + limit + if end > len(nodes) { + end = len(nodes) + } + + return nodes[start:end] +} + +// Helper methods for node scheduling filtering, sorting, and pagination + +func (impl *ClusterOverviewServiceImpl) filterNodeScheduling(nodes []bean.NodeSchedulingDetail, searchKey string) []bean.NodeSchedulingDetail { + if searchKey == "" { + return nodes + } + + searchKey = strings.ToLower(searchKey) + var filtered []bean.NodeSchedulingDetail + + for _, node := range nodes { + schedulableStr := "schedulable" + if !node.Schedulable { + schedulableStr = "unschedulable" + } + + if strings.Contains(strings.ToLower(node.NodeName), searchKey) || + strings.Contains(strings.ToLower(node.ClusterName), searchKey) || + strings.Contains(schedulableStr, searchKey) { + filtered = append(filtered, node) + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) sortNodeScheduling(nodes []bean.NodeSchedulingDetail, sortBy, sortOrder string) []bean.NodeSchedulingDetail { + if sortBy == "" { + sortBy = constants.SortFieldNodeName // default sort + } + if sortOrder == "" { + sortOrder = constants.SortOrderAsc // default order + } + + // Create a copy to avoid modifying the original + sorted := make([]bean.NodeSchedulingDetail, len(nodes)) + copy(sorted, nodes) + + sort.Slice(sorted, func(i, j int) bool { + var compareResult int + + switch sortBy { + case constants.SortFieldNodeName: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + case constants.SortFieldClusterName: + compareResult = strings.Compare(sorted[i].ClusterName, sorted[j].ClusterName) + case constants.SortFieldSchedulable: + // For boolean comparison: false < true + if sorted[i].Schedulable == sorted[j].Schedulable { + compareResult = 0 + } else if sorted[i].Schedulable { + compareResult = 1 + } else { + compareResult = -1 + } + default: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + } + + if sortOrder == constants.SortOrderDesc { + return compareResult > 0 + } + return compareResult < 0 + }) + + return sorted +} + +func (impl *ClusterOverviewServiceImpl) paginateNodeScheduling(nodes []bean.NodeSchedulingDetail, offset, limit int) []bean.NodeSchedulingDetail { + if limit <= 0 { + limit = 10 // default limit + } + + start := offset + if start < 0 { + start = 0 + } + if start >= len(nodes) { + return []bean.NodeSchedulingDetail{} + } + + end := start + limit + if end > len(nodes) { + end = len(nodes) + } + + return nodes[start:end] +} + +// Helper methods for autoscaler node filtering, sorting, and pagination + +func (impl *ClusterOverviewServiceImpl) filterAutoscalerNodes(nodes []bean.AutoscalerNodeDetail, searchKey string) []bean.AutoscalerNodeDetail { + if searchKey == "" { + return nodes + } + + searchKey = strings.ToLower(searchKey) + var filtered []bean.AutoscalerNodeDetail + + for _, node := range nodes { + if strings.Contains(strings.ToLower(node.NodeName), searchKey) || + strings.Contains(strings.ToLower(node.ClusterName), searchKey) || + strings.Contains(strings.ToLower(node.ManagedBy), searchKey) { + filtered = append(filtered, node) + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) sortAutoscalerNodes(nodes []bean.AutoscalerNodeDetail, sortBy, sortOrder string) []bean.AutoscalerNodeDetail { + if sortBy == "" { + sortBy = constants.SortFieldNodeName // default sort + } + if sortOrder == "" { + sortOrder = constants.SortOrderAsc // default order + } + + // Create a copy to avoid modifying the original + sorted := make([]bean.AutoscalerNodeDetail, len(nodes)) + copy(sorted, nodes) + + sort.Slice(sorted, func(i, j int) bool { + var compareResult int + + switch sortBy { + case constants.SortFieldNodeName: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + case constants.SortFieldClusterName: + compareResult = strings.Compare(sorted[i].ClusterName, sorted[j].ClusterName) + case constants.SortFieldAutoscalerType: + compareResult = strings.Compare(sorted[i].ManagedBy, sorted[j].ManagedBy) + default: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + } + + if sortOrder == constants.SortOrderDesc { + return compareResult > 0 + } + return compareResult < 0 + }) + + return sorted +} + +func (impl *ClusterOverviewServiceImpl) paginateAutoscalerNodes(nodes []bean.AutoscalerNodeDetail, offset, limit int) []bean.AutoscalerNodeDetail { + if limit <= 0 { + limit = 10 // default limit + } + + start := offset + if start < 0 { + start = 0 + } + if start >= len(nodes) { + return []bean.AutoscalerNodeDetail{} + } + + end := start + limit + if end > len(nodes) { + end = len(nodes) + } + + return nodes[start:end] +} diff --git a/pkg/overview/DoraMetricsService.go b/pkg/overview/DoraMetricsService.go new file mode 100644 index 0000000000..cfae44b40a --- /dev/null +++ b/pkg/overview/DoraMetricsService.go @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/devtron-labs/devtron/client/lens" + //"github.com/devtron-labs/devtron/client/lens" + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/util" + "go.uber.org/zap" +) + +type DoraMetricsService interface { + GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) +} +type DoraMetricsServiceImpl struct { + logger *zap.SugaredLogger + lensClient lens.LensClient + appRepository app.AppRepository + pipelineRepository pipelineConfig.PipelineRepository + environmentRepository repository.EnvironmentRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository +} + +func NewDoraMetricsServiceImpl( + logger *zap.SugaredLogger, + lensClient lens.LensClient, + appRepository app.AppRepository, + pipelineRepository pipelineConfig.PipelineRepository, + environmentRepository repository.EnvironmentRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, +) *DoraMetricsServiceImpl { + return &DoraMetricsServiceImpl{ + logger: logger, + lensClient: lensClient, + appRepository: appRepository, + pipelineRepository: pipelineRepository, + environmentRepository: environmentRepository, + cdWorkflowRepository: cdWorkflowRepository, + } +} + +func (impl *DoraMetricsServiceImpl) GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) { + impl.logger.Infow("getting DORA metrics", "request", request) + + // Get all apps with production pipelines using optimized query for current period + appEnvPairs, err := impl.getAppEnvironmentPairsOptimized(ctx, request.TimeRangeRequest.From, request.TimeRangeRequest.To) + if err != nil { + impl.logger.Errorw("error getting app-environment pairs", "err", err) + return nil, err + } + + if len(appEnvPairs) == 0 { + impl.logger.Warnw("no production pipelines found with deployment history") + return bean.NewDoraMetricsResponse(), nil + } + + // Calculate all DORA metrics using single Lens API call per app-env pair + allMetrics, err := impl.calculateAllDoraMetricsFromLens(ctx, request, appEnvPairs) + if err != nil { + impl.logger.Errorw("error calculating DORA metrics from lens", "err", err) + return nil, err + } + + response := &bean.DoraMetricsResponse{ + ProdDeploymentPipelineCount: len(appEnvPairs), + DeploymentFrequency: allMetrics.DeploymentFrequency, + MeanLeadTime: allMetrics.MeanLeadTime, + ChangeFailureRate: allMetrics.ChangeFailureRate, + MeanTimeToRecovery: allMetrics.MeanTimeToRecovery, + } + + return response, nil +} + +// getAppEnvironmentPairsOptimized is an optimized version that uses a single query +// to fetch production pipelines with deployment history within the specified time range +// This method only fetches the minimal data needed (AppId and EnvId) for better performance +func (impl *DoraMetricsServiceImpl) getAppEnvironmentPairsOptimized(ctx context.Context, from, to *time.Time) ([]lens.AppEnvPair, error) { + prodPipelines, err := impl.pipelineRepository.FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error getting production pipelines with deployment history in time range", "from", from, "to", to, "err", err) + return nil, err + } + + if len(prodPipelines) == 0 { + impl.logger.Warnw("no production pipelines found with deployment history in time range", "from", from, "to", to) + return []lens.AppEnvPair{}, nil + } + + // Convert to our simplified structure, only keeping the IDs + var appEnvPairs []lens.AppEnvPair + for _, pipeline := range prodPipelines { + appEnvPairs = append(appEnvPairs, lens.AppEnvPair{ + AppId: pipeline.AppId, + EnvId: pipeline.EnvironmentId, + }) + } + + return appEnvPairs, nil +} + +// calculateAllDoraMetricsFromLens calculates all DORA metrics using single Lens API call per app-env pair +func (impl *DoraMetricsServiceImpl) calculateAllDoraMetricsFromLens(ctx context.Context, request *bean.DoraMetricsRequest, appEnvPairs []lens.AppEnvPair) (*bean.AllDoraMetrics, error) { + currentMetricsData, err := impl.fetchAllMetricsFromLens(ctx, appEnvPairs, request.TimeRangeRequest.From, request.TimeRangeRequest.To) + if err != nil { + impl.logger.Errorw("error fetching current period metrics from lens", "err", err) + return nil, err + } + + // Get app-env pairs for previous period + previousAppEnvPairs, err := impl.getAppEnvironmentPairsOptimized(ctx, request.PrevFrom, request.PrevTo) + if err != nil { + impl.logger.Errorw("error getting app-environment pairs for previous period", "err", err) + // Continue without comparison if we can't get previous period data + return impl.createAllDoraMetricsWithoutComparison(currentMetricsData), nil + } + + var previousMetricsData map[string]*bean.LensMetrics + if len(previousAppEnvPairs) > 0 { + previousMetricsData, err = impl.fetchAllMetricsFromLens(ctx, previousAppEnvPairs, request.PrevFrom, request.PrevTo) + if err != nil { + impl.logger.Errorw("error fetching previous period metrics from lens", "err", err) + // Continue without comparison if we can't get previous period data + return impl.createAllDoraMetricsWithoutComparison(currentMetricsData), nil + } + } + + // Calculate all metrics with comparison + return impl.createAllDoraMetricsWithComparison(currentMetricsData, previousMetricsData), nil +} + +// fetchAllMetricsFromLens fetches all DORA metrics from Lens using single bulk API call +func (impl *DoraMetricsServiceImpl) fetchAllMetricsFromLens(ctx context.Context, bulkAppEnvPairs []lens.AppEnvPair, from, to *time.Time) (map[string]*bean.LensMetrics, error) { + metricsData := make(map[string]*bean.LensMetrics) + + if len(bulkAppEnvPairs) == 0 { + return metricsData, nil + } + + bulkRequest := &lens.BulkMetricRequest{ + AppEnvPairs: bulkAppEnvPairs, + From: from, + To: to, + } + + // Make single bulk call to get all metrics for all app-env pairs + lensResp, resCode, err := impl.lensClient.GetBulkAppMetrics(bulkRequest) + if err != nil { + impl.logger.Errorw("error calling lens bulk API for all metrics", "err", err) + return nil, err + } + + if !resCode.IsSuccess() { + impl.logger.Errorw("lens bulk API returned error", "statusCode", *resCode) + return nil, fmt.Errorf("lens bulk API returned error with status code: %d", *resCode) + } + + // Parse the new bulk response - now it's directly an array of DoraMetrics + var doraMetricsArray []*lens.DoraMetrics + if err := json.Unmarshal(lensResp.Result, &doraMetricsArray); err != nil { + impl.logger.Errorw("error unmarshaling lens bulk response", "err", err) + return nil, err + } + + // Process results and map them to app-env keys + for _, doraMetric := range doraMetricsArray { + if doraMetric == nil { + impl.logger.Warnw("nil dora metric in response") + continue + } + + // Convert lens.DoraMetrics to LensMetrics (our internal struct) + // Map the new field names to the old structure for backward compatibility + lensMetrics := &bean.LensMetrics{ + AverageCycleTime: doraMetric.DeploymentFrequency, // DeploymentFrequency maps to AverageCycleTime + AverageLeadTime: doraMetric.MeanLeadTimeForChanges, // MeanLeadTimeForChanges maps to AverageLeadTime + ChangeFailureRate: doraMetric.ChangeFailureRate, // ChangeFailureRate maps directly + AverageRecoveryTime: doraMetric.MeanTimeToRecovery, // MeanTimeToRecovery maps to AverageRecoveryTime + } + + // Store metrics with app-env ID key + key := fmt.Sprintf("%d-%d", doraMetric.AppId, doraMetric.EnvId) + metricsData[key] = lensMetrics + } + + return metricsData, nil +} + +// createAllDoraMetricsWithoutComparison creates all DORA metrics without comparison data +func (impl *DoraMetricsServiceImpl) createAllDoraMetricsWithoutComparison(currentMetricsData map[string]*bean.LensMetrics) *bean.AllDoraMetrics { + // Extract all metric values from current data + var deploymentFreqValues, leadTimeValues, changeFailureValues, recoveryTimeValues []float64 + + for _, metrics := range currentMetricsData { + deploymentFreqValues = append(deploymentFreqValues, metrics.AverageCycleTime) + leadTimeValues = append(leadTimeValues, metrics.AverageLeadTime) + changeFailureValues = append(changeFailureValues, metrics.ChangeFailureRate) + recoveryTimeValues = append(recoveryTimeValues, metrics.AverageRecoveryTime) + } + + // Calculate averages + deploymentFreqAvg := util.CalculateAverageFromValues(deploymentFreqValues) + leadTimeAvg := util.CalculateAverageFromValues(leadTimeValues) + changeFailureAvg := util.CalculateAverageFromValues(changeFailureValues) + recoveryTimeAvg := util.CalculateAverageFromValues(recoveryTimeValues) + + // Calculate performance levels for each metric separately + deploymentFreqPerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryDeploymentFrequency) + leadTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanLeadTime) + changeFailurePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryChangeFailureRate) + recoveryTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanTimeToRecovery) + + deploymentFrequency := util.CreateDoraMetricObject(deploymentFreqAvg, bean.MetricValueUnitNumber, 0, bean.ComparisonUnitPercentage, deploymentFreqPerformanceLevels) + meanLeadTime := util.CreateDoraMetricObject(leadTimeAvg, bean.MetricValueUnitMinutes, 0, bean.ComparisonUnitMinutes, leadTimePerformanceLevels) + changeFailureRate := util.CreateDoraMetricObject(changeFailureAvg, bean.MetricValueUnitPercentage, 0, bean.ComparisonUnitPercentage, changeFailurePerformanceLevels) + meanTimeToRecovery := util.CreateDoraMetricObject(recoveryTimeAvg, bean.MetricValueUnitMinutes, 0, bean.ComparisonUnitMinutes, recoveryTimePerformanceLevels) + + allDoraMetrics := bean.NewAllDoraMetrics(). + WithDeploymentFrequency(deploymentFrequency). + WithMeanLeadTime(meanLeadTime). + WithChangeFailureRate(changeFailureRate). + WithMeanTimeToRecovery(meanTimeToRecovery) + + return allDoraMetrics + +} + +// createAllDoraMetricsWithComparison creates all DORA metrics with comparison data +func (impl *DoraMetricsServiceImpl) createAllDoraMetricsWithComparison(currentMetricsData, previousMetricsData map[string]*bean.LensMetrics) *bean.AllDoraMetrics { + // Extract current period values + var currentDeploymentFreq, currentLeadTime, currentChangeFailure, currentRecoveryTime []float64 + for _, metrics := range currentMetricsData { + currentDeploymentFreq = append(currentDeploymentFreq, metrics.AverageCycleTime) + currentLeadTime = append(currentLeadTime, metrics.AverageLeadTime) + currentChangeFailure = append(currentChangeFailure, metrics.ChangeFailureRate) + currentRecoveryTime = append(currentRecoveryTime, metrics.AverageRecoveryTime) + } + + // Extract previous period values + var previousDeploymentFreq, previousLeadTime, previousChangeFailure, previousRecoveryTime []float64 + for _, metrics := range previousMetricsData { + previousDeploymentFreq = append(previousDeploymentFreq, metrics.AverageCycleTime) + previousLeadTime = append(previousLeadTime, metrics.AverageLeadTime) + previousChangeFailure = append(previousChangeFailure, metrics.ChangeFailureRate) + previousRecoveryTime = append(previousRecoveryTime, metrics.AverageRecoveryTime) + } + + // Calculate averages + currentDeploymentFreqAvg := util.CalculateAverageFromValues(currentDeploymentFreq) + currentLeadTimeAvg := util.CalculateAverageFromValues(currentLeadTime) + currentChangeFailureAvg := util.CalculateAverageFromValues(currentChangeFailure) + currentRecoveryTimeAvg := util.CalculateAverageFromValues(currentRecoveryTime) + + previousDeploymentFreqAvg := util.CalculateAverageFromValues(previousDeploymentFreq) + previousLeadTimeAvg := util.CalculateAverageFromValues(previousLeadTime) + previousChangeFailureAvg := util.CalculateAverageFromValues(previousChangeFailure) + previousRecoveryTimeAvg := util.CalculateAverageFromValues(previousRecoveryTime) + + // Calculate comparisons + deploymentFreqCompValue := util.CalculateComparison(currentDeploymentFreqAvg, previousDeploymentFreqAvg, bean.MetricCategoryDeploymentFrequency) + leadTimeCompValue := util.CalculateComparison(currentLeadTimeAvg, previousLeadTimeAvg, bean.MetricCategoryMeanLeadTime) + changeFailureCompValue := util.CalculateComparison(currentChangeFailureAvg, previousChangeFailureAvg, bean.MetricCategoryChangeFailureRate) + recoveryTimeCompValue := util.CalculateComparison(currentRecoveryTimeAvg, previousRecoveryTimeAvg, bean.MetricCategoryMeanTimeToRecovery) + + // Calculate performance levels for each metric separately using current period data + deploymentFreqPerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryDeploymentFrequency) + leadTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanLeadTime) + changeFailurePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryChangeFailureRate) + recoveryTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanTimeToRecovery) + + deploymentFrequency := util.CreateDoraMetricObject(currentDeploymentFreqAvg, bean.MetricValueUnitNumber, deploymentFreqCompValue, bean.ComparisonUnitPercentage, deploymentFreqPerformanceLevels) + meanLeadTime := util.CreateDoraMetricObject(currentLeadTimeAvg, bean.MetricValueUnitMinutes, leadTimeCompValue, bean.ComparisonUnitMinutes, leadTimePerformanceLevels) + changeFailureRate := util.CreateDoraMetricObject(currentChangeFailureAvg, bean.MetricValueUnitPercentage, changeFailureCompValue, bean.ComparisonUnitPercentage, changeFailurePerformanceLevels) + meanTimeToRecovery := util.CreateDoraMetricObject(currentRecoveryTimeAvg, bean.MetricValueUnitMinutes, recoveryTimeCompValue, bean.ComparisonUnitMinutes, recoveryTimePerformanceLevels) + + allDoraMetrics := bean.NewAllDoraMetrics(). + WithDeploymentFrequency(deploymentFrequency). + WithMeanLeadTime(meanLeadTime). + WithChangeFailureRate(changeFailureRate). + WithMeanTimeToRecovery(meanTimeToRecovery) + + return allDoraMetrics +} diff --git a/pkg/overview/InsightsService.go b/pkg/overview/InsightsService.go new file mode 100644 index 0000000000..1fbf3422e0 --- /dev/null +++ b/pkg/overview/InsightsService.go @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "go.uber.org/zap" +) + +type InsightsService interface { + GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) +} + +type InsightsServiceImpl struct { + logger *zap.SugaredLogger + appRepository app.AppRepository + pipelineRepository pipelineConfig.PipelineRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository + environmentRepository repository.EnvironmentRepository +} + +func NewInsightsServiceImpl( + logger *zap.SugaredLogger, + appRepository app.AppRepository, + pipelineRepository pipelineConfig.PipelineRepository, + ciPipelineRepository pipelineConfig.CiPipelineRepository, + ciWorkflowRepository pipelineConfig.CiWorkflowRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, + environmentRepository repository.EnvironmentRepository, +) *InsightsServiceImpl { + return &InsightsServiceImpl{ + logger: logger, + appRepository: appRepository, + pipelineRepository: pipelineRepository, + ciPipelineRepository: ciPipelineRepository, + ciWorkflowRepository: ciWorkflowRepository, + cdWorkflowRepository: cdWorkflowRepository, + environmentRepository: environmentRepository, + } +} + +func (impl *InsightsServiceImpl) GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) { + var pipelines []bean.PipelineUsageItem + var totalCount int + var err error + + switch request.PipelineType { + case bean.BuildPipelines: + pipelines, totalCount, err = impl.getTriggeredBuildPipelines(ctx, request) + if err != nil { + impl.logger.Errorw("error getting triggered build pipelines", "err", err) + return nil, err + } + case bean.DeploymentPipelines: + pipelines, totalCount, err = impl.getTriggeredDeploymentPipelines(ctx, request) + if err != nil { + impl.logger.Errorw("error getting triggered deployment pipelines", "err", err) + return nil, err + } + default: + impl.logger.Errorw("invalid pipeline type", "pipelineType", request.PipelineType) + return nil, fmt.Errorf("invalid pipeline type: %s", request.PipelineType) + } + + response := &bean.InsightsResponse{ + Pipelines: pipelines, + TotalCount: totalCount, + } + + return response, nil +} + +func (impl *InsightsServiceImpl) getTriggeredBuildPipelines(ctx context.Context, request *bean.InsightsRequest) ([]bean.PipelineUsageItem, int, error) { + pipelineData, totalCount, err := impl.ciWorkflowRepository.GetTriggeredCIPipelines(request.TimeRangeRequest.From, request.TimeRangeRequest.To, request.SortOrder, request.Limit, request.Offset) + if err != nil { + impl.logger.Errorw("error getting triggered CI pipelines", "err", err) + return nil, 0, err + } + + var pipelineUsage []bean.PipelineUsageItem + for _, data := range pipelineData { + pipelineUsage = append(pipelineUsage, bean.PipelineUsageItem{ + AppID: data.AppID, + PipelineID: data.PipelineID, + PipelineName: data.PipelineName, + AppName: data.AppName, + TriggerCount: data.TriggerCount, + }) + } + + return pipelineUsage, totalCount, nil +} + +func (impl *InsightsServiceImpl) getTriggeredDeploymentPipelines(ctx context.Context, request *bean.InsightsRequest) ([]bean.PipelineUsageItem, int, error) { + pipelineData, totalCount, err := impl.cdWorkflowRepository.GetTriggeredCDPipelines(request.TimeRangeRequest.From, request.TimeRangeRequest.To, request.SortOrder, request.Limit, request.Offset) + if err != nil { + impl.logger.Errorw("error getting triggered CD pipelines", "err", err) + return nil, 0, err + } + + var pipelineUsage []bean.PipelineUsageItem + for _, data := range pipelineData { + pipelineUsage = append(pipelineUsage, bean.PipelineUsageItem{ + AppID: data.AppID, + EnvID: data.EnvID, + PipelineID: data.PipelineID, + PipelineName: data.PipelineName, + AppName: data.AppName, + EnvName: data.EnvName, + TriggerCount: data.TriggerCount, + }) + } + + return pipelineUsage, totalCount, nil +} + +// Approval policy coverage methods moved to ApprovalPolicyService diff --git a/pkg/overview/OverviewService.go b/pkg/overview/OverviewService.go new file mode 100644 index 0000000000..30bf4a09b9 --- /dev/null +++ b/pkg/overview/OverviewService.go @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +type OverviewService interface { + // New Apps Overview + GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) + + // New Workflow Overview + GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) + + // Build and Deployment Activity + GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) + GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) + + // DORA Metrics + GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) + + // Insights + GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) + + // Cluster Management Overview + GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) + DeleteClusterOverviewCache(ctx context.Context) error + RefreshClusterOverviewCache(ctx context.Context) error + + // Cluster Overview Detailed Drill-down API (unified endpoint for all node view group types) + GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) + + // Security Overview APIs + GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) + GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) + GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) + GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) + GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) +} + +type OverviewServiceImpl struct { + appManagementService AppManagementService + doraMetricsService DoraMetricsService + insightsService InsightsService + clusterOverviewService ClusterOverviewService + clusterCacheService cache.ClusterCacheService + securityOverviewService SecurityOverviewService +} + +func NewOverviewServiceImpl( + appManagementService AppManagementService, + doraMetricsService DoraMetricsService, + insightsService InsightsService, + clusterOverviewService ClusterOverviewService, + clusterCacheService cache.ClusterCacheService, + securityOverviewService SecurityOverviewService, +) *OverviewServiceImpl { + return &OverviewServiceImpl{ + appManagementService: appManagementService, + doraMetricsService: doraMetricsService, + insightsService: insightsService, + clusterOverviewService: clusterOverviewService, + clusterCacheService: clusterCacheService, + securityOverviewService: securityOverviewService, + } +} + +func (impl *OverviewServiceImpl) GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) { + return impl.appManagementService.GetAppsOverview(ctx) +} + +func (impl *OverviewServiceImpl) GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) { + return impl.appManagementService.GetWorkflowOverview(ctx) +} + +func (impl *OverviewServiceImpl) GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) { + return impl.appManagementService.GetBuildDeploymentActivity(ctx, request) +} + +func (impl *OverviewServiceImpl) GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) { + return impl.appManagementService.GetBuildDeploymentActivityDetailed(ctx, request) +} + +func (impl *OverviewServiceImpl) GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) { + return impl.doraMetricsService.GetDoraMetrics(ctx, request) +} + +func (impl *OverviewServiceImpl) GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) { + return impl.insightsService.GetInsights(ctx, request) +} + +func (impl *OverviewServiceImpl) GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + return impl.clusterOverviewService.GetClusterOverview(ctx) +} + +func (impl *OverviewServiceImpl) DeleteClusterOverviewCache(ctx context.Context) error { + impl.clusterCacheService.InvalidateClusterOverview() + return nil +} + +func (impl *OverviewServiceImpl) RefreshClusterOverviewCache(ctx context.Context) error { + return impl.clusterOverviewService.RefreshClusterOverviewCache(ctx) +} + +func (impl *OverviewServiceImpl) GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + return impl.clusterOverviewService.GetClusterOverviewDetailedNodeInfo(ctx, request) +} + +// ============================================================================ +// Security Overview APIs +// ============================================================================ + +func (impl *OverviewServiceImpl) GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) { + return impl.securityOverviewService.GetSecurityOverview(ctx, request) +} + +func (impl *OverviewServiceImpl) GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) { + return impl.securityOverviewService.GetSeverityInsights(ctx, request) +} + +func (impl *OverviewServiceImpl) GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) { + return impl.securityOverviewService.GetDeploymentSecurityStatus(ctx, request) +} + +func (impl *OverviewServiceImpl) GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) { + return impl.securityOverviewService.GetVulnerabilityTrend(ctx, currentTimeRange, envType, aggregationType) +} + +func (impl *OverviewServiceImpl) GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) { + return impl.securityOverviewService.GetBlockedDeploymentsTrend(ctx, currentTimeRange, aggregationType) +} diff --git a/pkg/overview/SecurityOverviewService.go b/pkg/overview/SecurityOverviewService.go new file mode 100644 index 0000000000..226e63c12a --- /dev/null +++ b/pkg/overview/SecurityOverviewService.go @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + "time" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/overview/adaptor" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + "github.com/devtron-labs/devtron/pkg/overview/util" + imageScanRepo "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" + scanBean "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" + "go.uber.org/zap" +) + +type SecurityOverviewService interface { + // 1. Security Overview API - "At a Glance" metrics (organization-wide) + GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) + + // 2. Severity Insights API - With prod/non-prod filtering + GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) + + // 3. Deployment Security Status API + GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) + + // 5. Vulnerability Trend API - Time-series with prod/non-prod filtering + GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) + + // 6. Blocked Deployments Trend API - Organization-wide + GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) +} + +type SecurityOverviewServiceImpl struct { + logger *zap.SugaredLogger + imageScanResultRepository imageScanRepo.ImageScanResultRepository + imageScanDeployInfoRepository imageScanRepo.ImageScanDeployInfoRepository + cveStoreRepository imageScanRepo.CveStoreRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository +} + +func NewSecurityOverviewServiceImpl( + logger *zap.SugaredLogger, + imageScanResultRepository imageScanRepo.ImageScanResultRepository, + imageScanDeployInfoRepository imageScanRepo.ImageScanDeployInfoRepository, + cveStoreRepository imageScanRepo.CveStoreRepository, + ciPipelineRepository pipelineConfig.CiPipelineRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, +) *SecurityOverviewServiceImpl { + return &SecurityOverviewServiceImpl{ + logger: logger, + imageScanResultRepository: imageScanResultRepository, + imageScanDeployInfoRepository: imageScanDeployInfoRepository, + cveStoreRepository: cveStoreRepository, + ciPipelineRepository: ciPipelineRepository, + cdWorkflowRepository: cdWorkflowRepository, + } +} + +func (service *SecurityOverviewServiceImpl) GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) { + service.logger.Infow("GetSecurityOverview called", "request", request) + + // Fetch all vulnerabilities with fixed_version in a single query + vulnerabilities, err := service.imageScanResultRepository.GetVulnerabilitiesWithFixedVersionByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error fetching vulnerabilities", "err", err) + return nil, fmt.Errorf("failed to fetch vulnerabilities: %w", err) + } + + // Calculate counts in application code + totalCount := len(vulnerabilities) + fixableCount := 0 + zeroDayCount := 0 + + uniqueCVEs := make(map[string]bool) + uniqueFixableCVEs := make(map[string]bool) + uniqueZeroDayCVEs := make(map[string]bool) + + for _, vuln := range vulnerabilities { + // Track unique CVEs + uniqueCVEs[vuln.CveStoreName] = true + + // Check if fixable (has fixed_version) + if vuln.FixedVersion != "" { + fixableCount++ + uniqueFixableCVEs[vuln.CveStoreName] = true + } else { + // Zero-day (no fixed_version) + zeroDayCount++ + uniqueZeroDayCVEs[vuln.CveStoreName] = true + } + } + + response := &bean.SecurityOverviewResponse{ + TotalVulnerabilities: &bean.VulnerabilityCount{ + Count: totalCount, + UniqueCount: len(uniqueCVEs), + }, + FixableVulnerabilities: &bean.VulnerabilityCount{ + Count: fixableCount, + UniqueCount: len(uniqueFixableCVEs), + }, + ZeroDayVulnerabilities: &bean.VulnerabilityCount{ + Count: zeroDayCount, + UniqueCount: len(uniqueZeroDayCVEs), + }, + } + + return response, nil +} + +func (service *SecurityOverviewServiceImpl) GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) { + service.logger.Infow("GetSeverityInsights called", "request", request) + + // Determine environment type filter + // nil = all environments, true = prod only, false = non-prod only + var isProd *bool + if request.EnvType == bean.EnvTypeProd { + prodValue := true + isProd = &prodValue + } else if request.EnvType == bean.EnvTypeNonProd { + nonProdValue := false + isProd = &nonProdValue + } + // If EnvType is "all", isProd remains nil + + // Fetch all vulnerability data with severity and execution time in a single query + vulnerabilities, err := service.imageScanResultRepository.GetSeverityInsightDataByFilters(request.EnvIds, request.ClusterIds, request.AppIds, isProd) + if err != nil { + service.logger.Errorw("error fetching severity insight data", "err", err) + return nil, fmt.Errorf("failed to fetch severity insight data: %w", err) + } + + // Initialize counters using adapter + severityCount := adaptor.NewSeverityCount() + ageDistribution := adaptor.NewAgeDistribution() + + // Current time for age calculation + now := time.Now() + + // Process vulnerabilities in a single pass + for _, vuln := range vulnerabilities { + severity := scanBean.Severity(vuln.Severity) + + // Count by severity + switch severity { + case scanBean.Critical: + severityCount.Critical++ + case scanBean.High: + severityCount.High++ + case scanBean.Medium: + severityCount.Medium++ + case scanBean.Low: + severityCount.Low++ + default: + severityCount.Unknown++ + } + + // Calculate age in days + age := now.Sub(vuln.ExecutionTime).Hours() / 24 + + // Count by age bucket AND severity + var ageBucket *bean.AgeBucketSeverity + if age < 30 { + ageBucket = ageDistribution.LessThan30Days + } else if age < 60 { + ageBucket = ageDistribution.Between30To60Days + } else if age < 90 { + ageBucket = ageDistribution.Between60To90Days + } else { + ageBucket = ageDistribution.MoreThan90Days + } + + // Increment severity count within the age bucket + switch severity { + case scanBean.Critical: + ageBucket.Critical++ + case scanBean.High: + ageBucket.High++ + case scanBean.Medium: + ageBucket.Medium++ + case scanBean.Low: + ageBucket.Low++ + default: + ageBucket.Unknown++ + } + } + + response := &bean.SeverityInsightsResponse{ + SeverityDistribution: severityCount, + AgeDistribution: ageDistribution, + } + + return response, nil +} + +func (service *SecurityOverviewServiceImpl) GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) { + service.logger.Infow("GetDeploymentSecurityStatus called", "request", request) + + // Get total active deployments count + totalDeployments, err := service.imageScanDeployInfoRepository.GetActiveDeploymentCountByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error getting total active deployments count", "err", err) + return nil, fmt.Errorf("failed to get total active deployments count: %w", err) + } + + // Get deployments with vulnerabilities count + deploymentsWithVulnerabilities, err := service.imageScanDeployInfoRepository.GetActiveDeploymentCountWithVulnerabilitiesByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error getting deployments with vulnerabilities count", "err", err) + return nil, fmt.Errorf("failed to get deployments with vulnerabilities count: %w", err) + } + + // Get scanned and unscanned deployment counts in a single optimized query + scannedCounts, err := service.imageScanDeployInfoRepository.GetActiveDeploymentScannedUnscannedCountByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error getting scanned/unscanned deployment counts", "err", err) + return nil, fmt.Errorf("failed to get scanned/unscanned deployment counts: %w", err) + } + + // Get total CI pipelines count (workflows) + totalCiPipelines, err := service.ciPipelineRepository.GetActiveCiPipelineCount() + if err != nil { + service.logger.Errorw("error getting total CI pipelines count", "err", err) + return nil, fmt.Errorf("failed to get total CI pipelines count: %w", err) + } + + // Get scan-enabled CI pipelines count (scan_enabled=true in ci_pipeline table) + scanEnabledCiPipelines, err := service.ciPipelineRepository.GetScanEnabledCiPipelineCount() + if err != nil { + service.logger.Errorw("error getting scan-enabled CI pipelines count", "err", err) + return nil, fmt.Errorf("failed to get scan-enabled CI pipelines count: %w", err) + } + + // Get CI pipelines with IMAGE SCAN plugin configured in POST-CI or PRE-CD stages + pluginConfiguredPipelines, err := service.ciPipelineRepository.GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd() + if err != nil { + service.logger.Errorw("error getting CI pipelines with IMAGE SCAN plugin in POST-CI or PRE-CD count", "err", err) + return nil, fmt.Errorf("failed to get CI pipelines with IMAGE SCAN plugin in POST-CI or PRE-CD count: %w", err) + } + + totalScanningEnabledPipelines := scanEnabledCiPipelines + pluginConfiguredPipelines + + // Build response with calculated percentages + // For unscanned images: percentage = unscanned / (unscanned + scanned) + totalScannableDeployments := scannedCounts.UnscannedCount + scannedCounts.ScannedCount + response := &bean.DeploymentSecurityStatusResponse{ + ActiveDeploymentsWithVulnerabilities: &bean.DeploymentMetric{ + Count: deploymentsWithVulnerabilities, + Percentage: calculatePercentage(deploymentsWithVulnerabilities, totalDeployments), + }, + ActiveDeploymentsWithUnscannedImages: &bean.DeploymentMetric{ + Count: scannedCounts.UnscannedCount, + Percentage: calculatePercentage(scannedCounts.UnscannedCount, totalScannableDeployments), + }, + WorkflowsWithScanningEnabled: &bean.WorkflowMetric{ + Count: totalScanningEnabledPipelines, + Percentage: calculatePercentage(totalScanningEnabledPipelines, totalCiPipelines), + }, + } + + return response, nil +} + +func (service *SecurityOverviewServiceImpl) GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) { + service.logger.Infow("GetVulnerabilityTrend called", "from", currentTimeRange.From, "to", currentTimeRange.To, "envType", envType, "aggregationType", aggregationType) + + // Determine environment type filter + // nil = all environments, true = prod only, false = non-prod only + var isProd *bool + if envType == bean.EnvTypeProd { + prodValue := true + isProd = &prodValue + } else if envType == bean.EnvTypeNonProd { + nonProdValue := false + isProd = &nonProdValue + } + // If envType is "all", isProd remains nil + + // Fetch vulnerability trend data from repository + vulnerabilities, err := service.imageScanResultRepository.GetVulnerabilityTrendDataByFilters( + currentTimeRange.From, + currentTimeRange.To, + isProd, + ) + if err != nil { + service.logger.Errorw("error getting vulnerability trend data", "err", err) + return nil, fmt.Errorf("failed to get vulnerability trend data: %w", err) + } + + // Aggregate vulnerabilities by time bucket and severity + trendData := service.aggregateVulnerabilitiesByTime(vulnerabilities, currentTimeRange.From, currentTimeRange.To, aggregationType) + + response := &bean.VulnerabilityTrendResponse{ + Trend: trendData, + } + + return response, nil +} + +// aggregateVulnerabilitiesByTime aggregates vulnerabilities by time buckets and severity +func (service *SecurityOverviewServiceImpl) aggregateVulnerabilitiesByTime( + vulnerabilities []*imageScanRepo.VulnerabilityTrendData, + from, to *time.Time, + aggregationType constants.AggregationType, +) []*bean.VulnerabilityTrendDataPoint { + // Map to track unique CVEs per time bucket and severity: timeKey -> severity -> set of CVE names + severityMap := make(map[string]map[int]map[string]bool) + + targetLocation := from.Location() + + // Process each vulnerability and bucket by time + for _, vuln := range vulnerabilities { + // Convert UTC execution time to target timezone for proper time bucketing + localExecutionTime := vuln.ExecutionTime.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = localExecutionTime.Truncate(time.Hour).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localExecutionTime.Year(), localExecutionTime.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + timeKey = localExecutionTime.Truncate(24 * time.Hour).Format("2006-01-02T15:04:05Z") + } + + // Initialize maps if needed + if severityMap[timeKey] == nil { + severityMap[timeKey] = make(map[int]map[string]bool) + } + if severityMap[timeKey][vuln.Severity] == nil { + severityMap[timeKey][vuln.Severity] = make(map[string]bool) + } + + // Track unique CVE names per time bucket and severity + severityMap[timeKey][vuln.Severity][vuln.CveStoreName] = true + } + + // Generate time-series data with zero values for missing time buckets + var trendData []*bean.VulnerabilityTrendDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + dataPoint := service.createVulnerabilityDataPoint(current, severityMap[timeKey]) + trendData = append(trendData, dataPoint) + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + dataPoint := service.createVulnerabilityDataPoint(current, severityMap[timeKey]) + trendData = append(trendData, dataPoint) + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Daily aggregation + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + dataPoint := service.createVulnerabilityDataPoint(current, severityMap[timeKey]) + trendData = append(trendData, dataPoint) + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData +} + +// createVulnerabilityDataPoint creates a data point with counts for each severity level +func (service *SecurityOverviewServiceImpl) createVulnerabilityDataPoint( + timestamp time.Time, + severityCounts map[int]map[string]bool, +) *bean.VulnerabilityTrendDataPoint { + dataPoint := &bean.VulnerabilityTrendDataPoint{ + Timestamp: timestamp, + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + Total: 0, + } + + if severityCounts == nil { + return dataPoint + } + + // Count unique CVEs for each severity level + for severity, cveSet := range severityCounts { + count := len(cveSet) + + switch scanBean.Severity(severity) { + case scanBean.Critical: + dataPoint.Critical = count + case scanBean.High: + dataPoint.High = count + case scanBean.Medium: + dataPoint.Medium = count + case scanBean.Low: + dataPoint.Low = count + default: + dataPoint.Unknown = count + } + + dataPoint.Total += count + } + + return dataPoint +} + +func (service *SecurityOverviewServiceImpl) GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) { + service.logger.Infow("GetBlockedDeploymentsTrend called", "from", currentTimeRange.From, "to", currentTimeRange.To, "aggregationType", aggregationType) + + // Fetch blocked deployment data from repository + blockedDeployments, err := service.cdWorkflowRepository.GetBlockedDeploymentsForTrend(currentTimeRange.From, currentTimeRange.To) + if err != nil { + service.logger.Errorw("error getting blocked deployments for trend", "err", err) + return nil, fmt.Errorf("failed to get blocked deployments: %w", err) + } + + // Aggregate blocked deployments by time bucket + trendData := service.aggregateBlockedDeploymentsByTime(blockedDeployments, currentTimeRange.From, currentTimeRange.To, aggregationType) + + response := &bean.BlockedDeploymentsTrendResponse{ + Trend: trendData, + } + + return response, nil +} + +// aggregateBlockedDeploymentsByTime aggregates blocked deployments by time buckets +func (service *SecurityOverviewServiceImpl) aggregateBlockedDeploymentsByTime( + blockedDeployments []pipelineConfig.BlockedDeploymentData, + from, to *time.Time, + aggregationType constants.AggregationType, +) []*bean.BlockedDeploymentDataPoint { + // Map to track counts per time bucket: Unix timestamp -> count + countMap := make(map[int64]int) + + targetLocation := from.Location() + + // Process each blocked deployment and bucket by time + for _, deployment := range blockedDeployments { + // Convert UTC started_on time to target timezone for proper time bucketing + localStartedOn := deployment.StartedOn.In(targetLocation) + + var bucketTime time.Time + if aggregationType == constants.AggregateByHour { + // Truncate to hour boundary in local timezone + bucketTime = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), + localStartedOn.Hour(), 0, 0, 0, targetLocation) + } else if aggregationType == constants.AggregateByMonth { + // Truncate to month boundary (1st day of month at midnight) + bucketTime = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation) + } else { + // Daily aggregation - truncate to day boundary (midnight) in local timezone + bucketTime = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), + 0, 0, 0, 0, targetLocation) + } + + // Use Unix timestamp as key to avoid timezone formatting issues + timeKey := bucketTime.Unix() + countMap[timeKey]++ + } + + // Generate time-series data with zero values for missing time buckets + var trendData []*bean.BlockedDeploymentDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Unix() + count := countMap[timeKey] + + trendData = append(trendData, &bean.BlockedDeploymentDataPoint{ + Timestamp: current, + Count: count, + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Unix() + count := countMap[timeKey] + + trendData = append(trendData, &bean.BlockedDeploymentDataPoint{ + Timestamp: current, + Count: count, + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Daily aggregation + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Unix() + count := countMap[timeKey] + + trendData = append(trendData, &bean.BlockedDeploymentDataPoint{ + Timestamp: current, + Count: count, + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData +} + +func calculatePercentage(count, total int) float64 { + if total == 0 { + return 0.0 + } + return util.RoundToTwoDecimals(float64(count) / float64(total) * 100.0) +} diff --git a/pkg/overview/adaptor/SecurityOverviewAdapter.go b/pkg/overview/adaptor/SecurityOverviewAdapter.go new file mode 100644 index 0000000000..b983e592f9 --- /dev/null +++ b/pkg/overview/adaptor/SecurityOverviewAdapter.go @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package adaptor + +import "github.com/devtron-labs/devtron/pkg/overview/bean" + +// SecurityOverviewAdapter provides factory methods for initializing security overview bean structs + +// NewSeverityCount returns a new initialized SeverityCount with all fields set to zero +func NewSeverityCount() *bean.SeverityCount { + return &bean.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + } +} + +// NewAgeBucketSeverity returns a new initialized AgeBucketSeverity with all fields set to zero +func NewAgeBucketSeverity() *bean.AgeBucketSeverity { + return &bean.AgeBucketSeverity{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + } +} + +// NewAgeDistribution returns a new initialized AgeDistribution with all nested structs initialized +func NewAgeDistribution() *bean.AgeDistribution { + return &bean.AgeDistribution{ + LessThan30Days: NewAgeBucketSeverity(), + Between30To60Days: NewAgeBucketSeverity(), + Between60To90Days: NewAgeBucketSeverity(), + MoreThan90Days: NewAgeBucketSeverity(), + } +} + +// NewVulnerabilityCount returns a new initialized VulnerabilityCount with all fields set to zero +func NewVulnerabilityCount() *bean.VulnerabilityCount { + return &bean.VulnerabilityCount{ + Count: 0, + UniqueCount: 0, + } +} + +// NewSecurityOverviewResponse returns a new initialized SecurityOverviewResponse with all nested structs initialized +func NewSecurityOverviewResponse() *bean.SecurityOverviewResponse { + return &bean.SecurityOverviewResponse{ + TotalVulnerabilities: NewVulnerabilityCount(), + FixableVulnerabilities: NewVulnerabilityCount(), + ZeroDayVulnerabilities: NewVulnerabilityCount(), + } +} + +// NewSeverityInsightsResponse returns a new initialized SeverityInsightsResponse with all nested structs initialized +func NewSeverityInsightsResponse() *bean.SeverityInsightsResponse { + return &bean.SeverityInsightsResponse{ + SeverityDistribution: NewSeverityCount(), + AgeDistribution: NewAgeDistribution(), + } +} + +// NewDeploymentMetric returns a new initialized DeploymentMetric with all fields set to zero +func NewDeploymentMetric() *bean.DeploymentMetric { + return &bean.DeploymentMetric{ + Count: 0, + Percentage: 0.0, + } +} + +// NewWorkflowMetric returns a new initialized WorkflowMetric with all fields set to zero +func NewWorkflowMetric() *bean.WorkflowMetric { + return &bean.WorkflowMetric{ + Count: 0, + Percentage: 0.0, + } +} + +// NewDeploymentSecurityStatusResponse returns a new initialized DeploymentSecurityStatusResponse with all nested structs initialized +func NewDeploymentSecurityStatusResponse() *bean.DeploymentSecurityStatusResponse { + return &bean.DeploymentSecurityStatusResponse{ + ActiveDeploymentsWithVulnerabilities: NewDeploymentMetric(), + ActiveDeploymentsWithUnscannedImages: NewDeploymentMetric(), + WorkflowsWithScanningEnabled: NewWorkflowMetric(), + } +} + +// NewVulnerabilitiesResponse returns a new initialized VulnerabilitiesResponse with empty slice and pagination info +func NewVulnerabilitiesResponse(offset, size int) *bean.VulnerabilitiesResponse { + return &bean.VulnerabilitiesResponse{ + Vulnerabilities: []*bean.Vulnerability{}, + Total: 0, + Offset: offset, + Size: size, + } +} + +// NewVulnerabilityTrendResponse returns a new initialized VulnerabilityTrendResponse with empty trend slice +func NewVulnerabilityTrendResponse() *bean.VulnerabilityTrendResponse { + return &bean.VulnerabilityTrendResponse{ + Trend: []*bean.VulnerabilityTrendDataPoint{}, + } +} + +// NewBlockedDeploymentsTrendResponse returns a new initialized BlockedDeploymentsTrendResponse with empty trend slice +func NewBlockedDeploymentsTrendResponse() *bean.BlockedDeploymentsTrendResponse { + return &bean.BlockedDeploymentsTrendResponse{ + Trend: []*bean.BlockedDeploymentDataPoint{}, + } +} diff --git a/pkg/overview/bean/DoraMetricBean.go b/pkg/overview/bean/DoraMetricBean.go new file mode 100644 index 0000000000..9ad30fdb03 --- /dev/null +++ b/pkg/overview/bean/DoraMetricBean.go @@ -0,0 +1,172 @@ +package bean + +import ( + "time" + + "github.com/devtron-labs/common-lib/utils" +) + +type AllDoraMetrics struct { + DeploymentFrequency *DoraMetric + MeanLeadTime *DoraMetric + ChangeFailureRate *DoraMetric + MeanTimeToRecovery *DoraMetric +} + +func NewAllDoraMetrics() *AllDoraMetrics { + return &AllDoraMetrics{ + DeploymentFrequency: &DoraMetric{}, + MeanLeadTime: &DoraMetric{}, + ChangeFailureRate: &DoraMetric{}, + MeanTimeToRecovery: &DoraMetric{}, + } +} + +func (r *AllDoraMetrics) WithDeploymentFrequency(deploymentFrequency *DoraMetric) *AllDoraMetrics { + r.DeploymentFrequency = deploymentFrequency + return r +} + +func (r *AllDoraMetrics) WithMeanLeadTime(meanLeadTime *DoraMetric) *AllDoraMetrics { + r.MeanLeadTime = meanLeadTime + return r +} + +func (r *AllDoraMetrics) WithChangeFailureRate(changeFailureRate *DoraMetric) *AllDoraMetrics { + r.ChangeFailureRate = changeFailureRate + return r +} + +func (r *AllDoraMetrics) WithMeanTimeToRecovery(meanTimeToRecovery *DoraMetric) *AllDoraMetrics { + r.MeanTimeToRecovery = meanTimeToRecovery + return r +} + +// LensMetrics represents the response structure from Lens API +type LensMetrics struct { + AverageCycleTime float64 `json:"average_cycle_time"` + AverageLeadTime float64 `json:"average_lead_time"` + ChangeFailureRate float64 `json:"change_failure_rate"` + AverageRecoveryTime float64 `json:"average_recovery_time"` + AverageDeploymentSize float32 `json:"average_deployment_size"` + AverageLineAdded float32 `json:"average_line_added"` + AverageLineDeleted float32 `json:"average_line_deleted"` + LastFailedTime string `json:"last_failed_time"` + RecoveryTimeLastFailed float64 `json:"recovery_time_last_failed"` +} + +type DoraMetric struct { + OverallAverage *MetricValue `json:"overallAverage"` + ComparisonValue int `json:"comparisonValue"` // Percentage or minutes change + ComparisonUnit ComparisonUnit `json:"comparisonUnit"` // PERCENTAGE or MINUTES + PerformanceLevelCount *PerformanceLevelCount `json:"performanceLevelCount"` // Count of pipelines in each performance category +} + +func NewDoraMetric() *DoraMetric { + return &DoraMetric{} +} +func (r *DoraMetric) WithOverallAverage(overallAverage *MetricValue) *DoraMetric { + r.OverallAverage = overallAverage + return r +} + +func (r *DoraMetric) WithComparisonValue(comparisonValue int) *DoraMetric { + r.ComparisonValue = comparisonValue + return r +} +func (r *DoraMetric) WithComparisonUnit(comparisonUnit ComparisonUnit) *DoraMetric { + r.ComparisonUnit = comparisonUnit + return r +} +func (r *DoraMetric) WithPerformanceLevelCount(performanceLevelCount *PerformanceLevelCount) *DoraMetric { + r.PerformanceLevelCount = performanceLevelCount + return r +} + +type MetricValue struct { + Value float64 `json:"value"` + Unit string `json:"unit"` // NUMBER, PERCENTAGE, MINUTES +} + +type PerformanceLevelCount struct { + Elite int `json:"elite"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` +} + +// DORA Metrics Beans +type DoraMetricsRequest struct { + TimeRangeRequest *utils.TimeRangeRequest `json:"timeRangeRequest"` + PrevFrom *time.Time `json:"prevFrom,omitempty"` // Previous period start time + PrevTo *time.Time `json:"prevTo,omitempty"` // Previous period end time +} + +type DoraMetricsResponse struct { + ProdDeploymentPipelineCount int `json:"prodDeploymentPipelineCount"` + DeploymentFrequency *DoraMetric `json:"deploymentFrequency"` + MeanLeadTime *DoraMetric `json:"meanLeadTime"` + ChangeFailureRate *DoraMetric `json:"changeFailureRate"` + MeanTimeToRecovery *DoraMetric `json:"meanTimeToRecovery"` +} + +func NewDoraMetricsResponse() *DoraMetricsResponse { + return &DoraMetricsResponse{} +} + +type ComparisonUnit string + +const ( + ComparisonUnitMinutes ComparisonUnit = "MINUTES" + ComparisonUnitPercentage ComparisonUnit = "PERCENTAGE" +) + +type MetricValueUnit string + +const ( + MetricValueUnitNumber MetricValueUnit = "NUMBER" + MetricValueUnitPercentage MetricValueUnit = "PERCENTAGE" + MetricValueUnitMinutes MetricValueUnit = "MINUTES" +) + +func (r MetricValueUnit) ToString() string { + return string(r) +} + +type PerformanceCategory string + +const ( + PerformanceElite PerformanceCategory = "Elite" + PerformanceHigh PerformanceCategory = "High" + PerformanceMedium PerformanceCategory = "Medium" + PerformanceLow PerformanceCategory = "Low" +) + +type MetricCategory string + +const ( + MetricCategoryMeanTimeToRecovery MetricCategory = "meanTimeToRecovery" + MetricCategoryChangeFailureRate MetricCategory = "changeFailureRate" + MetricCategoryMeanLeadTime MetricCategory = "meanLeadTime" + MetricCategoryDeploymentFrequency MetricCategory = "deploymentFrequency" +) + +// IsValidMetricCategory checks if the given string is a valid metric category +func IsValidMetricCategory(category string) bool { + switch MetricCategory(category) { + case MetricCategoryDeploymentFrequency, MetricCategoryMeanLeadTime, MetricCategoryChangeFailureRate, MetricCategoryMeanTimeToRecovery: + return true + default: + return false + } +} + +// IsValidPerformanceCategory checks if the given string is a valid performance category +func IsValidPerformanceCategory(category string) bool { + switch PerformanceCategory(category) { + case PerformanceElite, PerformanceHigh, PerformanceMedium, PerformanceLow: + return true + default: + return false + } +} diff --git a/pkg/overview/bean/OverviewBean.go b/pkg/overview/bean/OverviewBean.go new file mode 100644 index 0000000000..11eb4a058a --- /dev/null +++ b/pkg/overview/bean/OverviewBean.go @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package bean + +import ( + "time" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +type BuildDeploymentActivityRequest struct { + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type ActivityKind string + +const ( + ActivityKindBuildTrigger ActivityKind = "buildTrigger" + ActivityKindDeploymentTrigger ActivityKind = "deploymentTrigger" + ActivityKindAvgBuildTime ActivityKind = "avgBuildTime" +) + +type BuildDeploymentActivityDetailedRequest struct { + ActivityKind ActivityKind `json:"activityKind" validate:"required,oneof=buildTrigger deploymentTrigger avgBuildTime"` + AggregationType constants.AggregationType `json:"aggregationType,omitempty"` + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type AppMetrics struct { + Total int `json:"total"` + YourApps *AppTypeMetrics `json:"yourApps"` + ThirdPartyApps *AppTypeMetrics `json:"thirdPartyApps"` +} + +type PipelineMetrics struct { + Total int `json:"total"` + Production int `json:"production"` + NonProduction int `json:"nonProduction"` +} + +// Common structure for entity metadata +type EntityMetadata struct { + Name string `json:"name"` + CreatedOn time.Time `json:"createdOn"` +} + +// Time-based aggregated data point +type TimeDataPoint struct { + Date string `json:"date"` // YYYY-MM-DD format for days, YYYY-MM-DD HH:00 format for hours + Count int `json:"count"` // Aggregated count for this time period +} + +// Enhanced metrics structures with detailed metadata +type ProjectMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type AppTypeMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type EnvironmentMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type BuildPipelineMetrics struct { + Total int `json:"total"` + NormalCiPipelines *CiPipelineTypeMetrics `json:"normalCiPipelines"` + ExternalCiPipelines *CiPipelineTypeMetrics `json:"externalCiPipelines"` +} + +type CiPipelineTypeMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type CdPipelineMetrics struct { + Total int `json:"total"` + Production *PipelineEnvironmentMetrics `json:"production"` + NonProduction *PipelineEnvironmentMetrics `json:"nonProduction"` +} + +type PipelineEnvironmentMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type DeploymentMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +// Trend-based metrics structures for aggregated time-series data +type ProjectTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type AppTrendMetrics struct { + Total int `json:"total"` + YourApps *AppTypeTrendMetrics `json:"yourApps"` + ThirdPartyApps *AppTypeTrendMetrics `json:"thirdPartyApps"` +} + +type AppTypeTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type EnvironmentTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type BuildPipelineTrendMetrics struct { + Total int `json:"total"` + NormalCiPipelines *CiPipelineTypeTrendMetrics `json:"normalCiPipelines"` + ExternalCiPipelines *CiPipelineTypeTrendMetrics `json:"externalCiPipelines"` +} + +type CiPipelineTypeTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type CdPipelineTrendMetrics struct { + Total int `json:"total"` + Production *PipelineEnvironmentTrendMetrics `json:"production"` + NonProduction *PipelineEnvironmentTrendMetrics `json:"nonProduction"` +} + +type PipelineEnvironmentTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type TrendComparison struct { + Value int `json:"value"` // The difference value (can be positive or negative) + Label string `json:"label"` // e.g., "this month", "this week", "this quarter" +} + +type AppsOverviewResponse struct { + Projects *AtAGlanceMetric `json:"projects"` + YourApplications *AtAGlanceMetric `json:"yourApplications"` + HelmApplications *AtAGlanceMetric `json:"helmApplications"` + Environments *AtAGlanceMetric `json:"environments"` +} + +type WorkflowOverviewResponse struct { + BuildPipelines *AtAGlanceMetric `json:"buildPipelines"` + ExternalImageSource *AtAGlanceMetric `json:"externalImageSource"` + AllDeploymentPipelines *AtAGlanceMetric `json:"allDeploymentPipelines"` + ScanningEnabledInWorkflows *AtAGlanceMetric `json:"scanningEnabledInWorkflows"` + GitOpsComplianceProdPipelines *AtAGlanceMetric `json:"gitOpsComplianceProdPipelines"` + ProductionPipelines *AtAGlanceMetric `json:"productionPipelines"` +} + +type AtAGlanceMetric struct { + Total int `json:"total"` + Percentage float64 `json:"percentage,omitempty"` // Optional: percentage value for metrics that represent percentages +} + +type BuildDeploymentActivityResponse struct { + TotalBuildTriggers int `json:"totalBuildTriggers"` + AverageBuildTime float64 `json:"averageBuildTime"` // in minutes + TotalDeploymentTriggers int `json:"totalDeploymentTriggers"` +} + +type BuildDeploymentActivityDetailedResponse struct { + ActivityKind ActivityKind `json:"activityKind"` // Type of activity data returned + AggregationType constants.AggregationType `json:"aggregationType"` // HOURLY, DAILY, or MONTHLY + BuildTriggersTrend []BuildStatusDataPoint `json:"buildTriggersTrend,omitempty"` + DeploymentTriggersTrend []DeploymentStatusDataPoint `json:"deploymentTriggersTrend,omitempty"` + AvgBuildTimeTrend []BuildTimeDataPoint `json:"avgBuildTimeTrend,omitempty"` +} + +type BuildStatusDataPoint struct { + Timestamp time.Time `json:"timestamp"` // Timestamp representing start of aggregation period + Total int `json:"total"` // Total build triggers + Successful int `json:"successful"` // Successful builds + Failed int `json:"failed"` // Failed builds +} + +type DeploymentStatusDataPoint struct { + Timestamp time.Time `json:"timestamp"` // Timestamp representing start of aggregation period + Total int `json:"total"` // Total deployment triggers + Successful int `json:"successful"` // Successful deployments + Failed int `json:"failed"` // Failed deployments +} + +type BuildTimeDataPoint struct { + Timestamp time.Time `json:"timestamp"` // Timestamp representing start of aggregation period + AverageBuildTime float64 `json:"averageBuildTime"` // in minutes for that time period +} + +// Insights Beans +type PipelineType string + +const ( + BuildPipelines PipelineType = "buildPipelines" + DeploymentPipelines PipelineType = "deploymentPipelines" +) + +type SortOrder string + +const ( + ASC SortOrder = "ASC" + DESC SortOrder = "DESC" +) + +type InsightsRequest struct { + TimeRangeRequest *utils.TimeRangeRequest `json:"timeRangeRequest"` + PipelineType PipelineType `json:"pipelineType"` + SortOrder SortOrder `json:"sortOrder"` + Limit int `json:"limit"` + Offset int `json:"offset"` +} + +type InsightsResponse struct { + Pipelines []PipelineUsageItem `json:"pipelines"` + TotalCount int `json:"totalCount"` +} + +type PipelineUsageItem struct { + AppID int `json:"appId"` // Required for both CI and CD pipelines + EnvID int `json:"envId,omitempty"` // Only for deployment pipelines + PipelineID int `json:"pipelineId"` + PipelineName string `json:"pipelineName"` + AppName string `json:"appName"` + EnvName string `json:"envName,omitempty"` // Only for deployment pipelines + TriggerCount int `json:"triggerCount"` +} + +type ApprovalPolicyOverviewResponse struct { + TotalProdPipelineCount int `json:"totalProdPipelineCount"` + PipelineCountWithConfigApproval int `json:"pipelineCountWithConfigApproval"` + PipelineCountWithDeploymentApproval int `json:"pipelineCountWithDeploymentApproval"` +} + +// Cluster Management Overview Beans + +// ClusterOverviewRequest represents the request for cluster management overview +type ClusterOverviewRequest struct { + // No specific filters needed for now - returns all cluster data +} + +// ClusterOverviewResponse represents the comprehensive cluster management overview +type ClusterOverviewResponse struct { + TotalClusters int `json:"totalClusters"` + TotalCpuCapacity *ResourceCapacity `json:"totalCpuCapacity"` + TotalMemoryCapacity *ResourceCapacity `json:"totalMemoryCapacity"` + ClusterStatusBreakdown *ClusterStatusBreakdown `json:"clusterStatusBreakdown"` + NodeSchedulingBreakdown *NodeSchedulingBreakdown `json:"nodeSchedulingBreakdown"` + NodeErrorBreakdown *NodeErrorBreakdown `json:"nodeErrorBreakdown"` + ClusterDistribution *ClusterDistribution `json:"clusterDistribution"` + ClusterCapacityDistribution []ClusterCapacityDistribution `json:"clusterCapacityDistribution"` + NodeDistribution *NodeDistribution `json:"nodeDistribution"` +} + +// ResourceCapacity represents capacity with value and unit +type ResourceCapacity struct { + Value string `json:"value"` + Unit string `json:"unit"` +} + +// ClusterStatusBreakdown represents cluster health status breakdown +type ClusterStatusBreakdown struct { + Healthy int `json:"healthy"` + Unhealthy int `json:"unhealthy"` + ConnectionFailed int `json:"connectionFailed"` +} + +// NodeErrorBreakdown represents breakdown of node errors with detailed node information +type NodeErrorBreakdown struct { + ErrorCounts map[string]int `json:"errorCounts"` // Map of error types to their counts + Total int `json:"total"` // Total number of node errors + NodeErrors []NodeErrorDetail `json:"nodeErrors"` // Detailed list of nodes with errors +} + +// NodeErrorDetail represents detailed error information for a single node +type NodeErrorDetail struct { + NodeName string `json:"nodeName"` // Name of the node with errors + ClusterName string `json:"clusterName"` // Name of the cluster the node belongs to + ClusterID int `json:"clusterId"` // ID of the cluster + Errors []string `json:"errors"` // List of error types + NodeStatus string `json:"nodeStatus"` // Current status of the node (Ready/Not Ready) +} + +// NodeSchedulingBreakdown represents breakdown of node scheduling status with detailed node information +type NodeSchedulingBreakdown struct { + Schedulable int `json:"schedulable"` // Count of schedulable nodes + Unschedulable int `json:"unschedulable"` // Count of unschedulable nodes + Total int `json:"total"` // Total number of nodes + SchedulableNodes []NodeSchedulingDetail `json:"schedulableNodes"` // Detailed list of schedulable nodes + UnschedulableNodes []NodeSchedulingDetail `json:"unschedulableNodes"` // Detailed list of unschedulable nodes +} + +// NodeSchedulingDetail represents detailed information about a node's scheduling status +type NodeSchedulingDetail struct { + NodeName string `json:"nodeName"` // Name of the node + ClusterName string `json:"clusterName"` // Name of the cluster the node belongs to + ClusterID int `json:"clusterId"` // ID of the cluster + Schedulable bool `json:"schedulable"` // Whether the node is schedulable +} + +// ClusterDistribution represents cluster distribution by provider and cluster version +type ClusterDistribution struct { + ByProvider []ProviderDistribution `json:"byProvider"` + ByVersion []VersionDistribution `json:"byVersion"` +} + +// ProviderDistribution represents cluster count by cloud provider +type ProviderDistribution struct { + Provider string `json:"provider"` // AWS, GCP, Azure, On-Premise, etc. + Count int `json:"count"` +} + +// VersionDistribution represents cluster count by Kubernetes version (major.minor only) +type VersionDistribution struct { + Version string `json:"version"` // e.g., "1.28", "1.29", "1.30" (major.minor only, patch ignored) + Count int `json:"count"` +} + +// ClusterCapacityDistribution represents capacity distribution for individual clusters +type ClusterCapacityDistribution struct { + ClusterID int `json:"clusterId"` + ClusterName string `json:"clusterName"` + ServerVersion string `json:"serverVersion"` // Kubernetes server version (e.g., "v1.28.3") + CPU *ClusterResourceMetric `json:"cpu"` + Memory *ClusterResourceMetric `json:"memory"` +} + +// ClusterResourceMetric represents resource metrics for a cluster +type ClusterResourceMetric struct { + Capacity float64 `json:"capacity"` // Capacity in cores for CPU, Gi for memory (with decimal precision) + UtilizationPercent float64 `json:"utilizationPercent"` // Utilization percentage + RequestsPercent float64 `json:"requestsPercent"` // Requests percentage + LimitsPercent float64 `json:"limitsPercent"` // Limits percentage +} + +// NodeDistribution represents node distribution by clusters and autoscaler +type NodeDistribution struct { + ByClusters []ClusterNodeCount `json:"byClusters"` // Node count grouped by cluster + ByAutoscaler []AutoscalerNodeCount `json:"byAutoscaler"` // Node count grouped by autoscaler type +} + +// Removed old structs - ClusterSummary, ResourceSummary, NodeCountSummary not needed in new API spec + +// ClusterNodeCount represents node count for a specific cluster +type ClusterNodeCount struct { + ClusterID int `json:"clusterId"` // ID of the cluster + ClusterName string `json:"clusterName"` // Name of the cluster + NodeCount int `json:"nodeCount"` // Total number of nodes in this cluster +} + +// AutoscalerNodeCount represents node count for a specific autoscaler type with detailed node information +type AutoscalerNodeCount struct { + AutoscalerType string `json:"autoscalerType"` // Type of autoscaler (EKS, Karpenter, Cast AI, GKE, CAS) + NodeCount int `json:"nodeCount"` // Total number of nodes managed by this autoscaler + NodeDetails []AutoscalerNodeDetail `json:"nodeDetails"` // Detailed list of nodes managed by this autoscaler +} + +// AutoscalerNodeDetail represents detailed information for a single node managed by autoscaler +type AutoscalerNodeDetail struct { + NodeName string `json:"nodeName"` // Name of the node + ClusterName string `json:"clusterName"` // Name of the cluster the node belongs to + ClusterID int `json:"clusterId"` // ID of the cluster + ManagedBy string `json:"managedBy"` // Display name of the autoscaler managing this node +} + +// Cluster Upgrade Overview Beans + +// ClusterUpgradeOverviewResponse represents the response for cluster upgrade overview +type ClusterUpgradeOverviewResponse struct { + CanCurrentUserUpgrade bool `json:"canCurrentUserUpgrade"` + LatestVersion string `json:"latestVersion"` + ClusterList []ClusterUpgradeDetails `json:"clusterList"` +} + +// ClusterUpgradeDetails represents upgrade details for a single cluster +type ClusterUpgradeDetails struct { + ClusterId int `json:"clusterId"` + ClusterName string `json:"clusterName"` + CurrentVersion string `json:"currentVersion"` + UpgradePath []string `json:"upgradePath"` +} + +// NodeViewGroupType represents the type of node view grouping +type NodeViewGroupType string + +const ( + NodeViewGroupTypeNodeErrors NodeViewGroupType = "nodeErrors" + NodeViewGroupTypeNodeScheduling NodeViewGroupType = "nodeScheduling" + NodeViewGroupTypeAutoscaler NodeViewGroupType = "autoscalerManaged" +) + +// ClusterOverviewDetailRequest represents request parameters for detailed drill-down API +type ClusterOverviewDetailRequest struct { + GroupBy NodeViewGroupType `schema:"groupBy" validate:"required,oneof=nodeErrors nodeScheduling autoscalerManaged"` + Offset int `schema:"offset"` + Limit int `schema:"limit"` + SortBy string `schema:"sortBy"` + SortOrder string `schema:"sortOrder"` // asc or desc + SearchKey string `schema:"searchKey"` + + // Filter parameters (optional, used based on GroupBy) + AutoscalerType string `schema:"autoscalerType"` // Filter by autoscaler type (only for autoscalerManaged groupBy) + ErrorType string `schema:"errorType"` // Filter by error type (only for nodeErrors groupBy) + SchedulableType string `schema:"schedulableType"` // Filter by schedulable type: "schedulable" or "unschedulable" (only for nodeScheduling groupBy) +} + +// ClusterOverviewNodeDetailedResponse represents the unified response for all node view group types +// Fields are conditionally included based on the groupBy parameter +type ClusterOverviewNodeDetailedResponse struct { + TotalCount int `json:"totalCount"` + NodeList []ClusterOverviewNodeDetailedItem `json:"nodeList"` +} + +// ClusterOverviewNodeDetailedItem represents a single node item in the detailed response +// Different fields are populated based on the NodeViewGroupType +type ClusterOverviewNodeDetailedItem struct { + // Common fields (always present) + NodeName string `json:"nodeName"` + ClusterName string `json:"clusterName"` + ClusterID int `json:"clusterId,omitempty"` + + // NodeErrors specific fields + NodeErrors []string `json:"nodeErrors,omitempty"` // List of error types (only for nodeErrors type) + NodeStatus string `json:"nodeStatus,omitempty"` // Node status: Ready/Not Ready (only for nodeErrors type) + + // NodeScheduling specific fields + Schedulable bool `json:"schedulable,omitempty"` // Whether node is schedulable (only for nodeScheduling type) + + // Autoscaler specific fields + AutoscalerType string `json:"autoscalerType,omitempty"` // Type of autoscaler managing the node (only for autoscaler type) +} diff --git a/pkg/overview/bean/SecurityOverviewBean.go b/pkg/overview/bean/SecurityOverviewBean.go new file mode 100644 index 0000000000..2c393f1d95 --- /dev/null +++ b/pkg/overview/bean/SecurityOverviewBean.go @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package bean + +import "time" + +// ============================================================================ +// Common Types +// ============================================================================ + +type EnvType string + +const ( + EnvTypeProd EnvType = "prod" + EnvTypeNonProd EnvType = "non-prod" + EnvTypeAll EnvType = "all" +) + +type VulnerabilityCount struct { + Count int `json:"count"` // Total instances (with duplicates) + UniqueCount int `json:"uniqueCount"` // Unique CVEs +} + +type SeverityCount struct { + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` +} + +type AgeBucketSeverity struct { + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` +} + +type AgeDistribution struct { + LessThan30Days *AgeBucketSeverity `json:"lessThan30Days"` + Between30To60Days *AgeBucketSeverity `json:"between30To60Days"` + Between60To90Days *AgeBucketSeverity `json:"between60To90Days"` + MoreThan90Days *AgeBucketSeverity `json:"moreThan90Days"` +} + +// ============================================================================ +// 1. Security Overview API (At a Glance - Organization-wide) +// ============================================================================ + +type SecurityOverviewRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` +} + +type SecurityOverviewResponse struct { + TotalVulnerabilities *VulnerabilityCount `json:"totalVulnerabilities"` + FixableVulnerabilities *VulnerabilityCount `json:"fixableVulnerabilities"` + ZeroDayVulnerabilities *VulnerabilityCount `json:"zeroDayVulnerabilities"` +} + +// ============================================================================ +// 2. Severity Insights API (With Prod/Non-Prod Filtering) +// ============================================================================ + +type SeverityInsightsRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` + EnvType EnvType `json:"envType" schema:"envType" validate:"required,oneof=prod non-prod all"` +} + +type SeverityInsightsResponse struct { + SeverityDistribution *SeverityCount `json:"severityDistribution"` + AgeDistribution *AgeDistribution `json:"ageDistribution"` +} + +// ============================================================================ +// 3. Deployment Security Status API +// ============================================================================ + +type DeploymentSecurityStatusRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` +} + +type DeploymentMetric struct { + Count int `json:"count"` + Percentage float64 `json:"percentage"` +} + +type WorkflowMetric struct { + Count int `json:"count"` + Percentage float64 `json:"percentage"` +} + +type DeploymentSecurityStatusResponse struct { + ActiveDeploymentsWithVulnerabilities *DeploymentMetric `json:"activeDeploymentsWithVulnerabilities"` + ActiveDeploymentsWithUnscannedImages *DeploymentMetric `json:"activeDeploymentsWithUnscannedImages"` + WorkflowsWithScanningEnabled *WorkflowMetric `json:"workflowsWithScanningEnabled"` +} + +// ============================================================================ +// 4. Vulnerability Details API (Paginated List) +// ============================================================================ + +type VulnerabilitiesRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` + Severity string `json:"severity" schema:"severity"` // Optional: critical, high, medium, low, unknown + Offset int `json:"offset" schema:"offset"` + Size int `json:"size" schema:"size" validate:"required,min=1,max=100"` +} + +type Vulnerability struct { + CveName string `json:"cveName"` + Severity string `json:"severity"` + Package string `json:"package"` + CurrentVersion string `json:"currentVersion"` + FixedVersion string `json:"fixedVersion"` + AppCount int `json:"appCount"` // Number of apps affected + EnvironmentCount int `json:"environmentCount"` // Number of environments affected + FirstDetected time.Time `json:"firstDetected"` +} + +type VulnerabilitiesResponse struct { + Vulnerabilities []*Vulnerability `json:"vulnerabilities"` + Total int `json:"total"` + Offset int `json:"offset"` + Size int `json:"size"` +} + +// ============================================================================ +// 5. Vulnerability Trend API (Time-series with Prod/Non-Prod Filtering) +// ============================================================================ + +type VulnerabilityTrendRequest struct { + TimeWindow string `json:"timeWindow" schema:"timeWindow" validate:"required,oneof=today thisWeek thisMonth thisQuarter"` + EnvType EnvType `json:"envType" schema:"envType" validate:"required,oneof=prod non-prod all"` + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` +} + +type VulnerabilityTrendDataPoint struct { + Timestamp time.Time `json:"timestamp"` + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` + Total int `json:"total"` +} + +type VulnerabilityTrendResponse struct { + Trend []*VulnerabilityTrendDataPoint `json:"trend"` +} + +// ============================================================================ +// 6. Blocked Deployments Trend API (Organization-wide) +// ============================================================================ + +type BlockedDeploymentsTrendRequest struct { + TimeWindow string `json:"timeWindow" schema:"timeWindow" validate:"required,oneof=today thisWeek thisMonth thisQuarter"` + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` +} + +type BlockedDeploymentDataPoint struct { + Timestamp time.Time `json:"timestamp"` + Count int `json:"count"` +} + +type BlockedDeploymentsTrendResponse struct { + Trend []*BlockedDeploymentDataPoint `json:"trend"` +} diff --git a/pkg/overview/cache/ClusterCacheService.go b/pkg/overview/cache/ClusterCacheService.go new file mode 100644 index 0000000000..2f56610a91 --- /dev/null +++ b/pkg/overview/cache/ClusterCacheService.go @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package cache + +import ( + "fmt" + "sync" + "time" + + "github.com/devtron-labs/devtron/pkg/overview/bean" + "go.uber.org/zap" +) + +// ClusterCacheService provides caching functionality for cluster overview data +type ClusterCacheService interface { + GetClusterOverview() (*bean.ClusterOverviewResponse, bool) + SetClusterOverview(data *bean.ClusterOverviewResponse) error + InvalidateClusterOverview() + InvalidateAll() + IsRefreshing() bool + SetRefreshing(refreshing bool) + GetCacheAge() time.Duration +} + +// ClusterCacheServiceImpl implements ClusterCacheService using in-memory cache +type ClusterCacheServiceImpl struct { + logger *zap.SugaredLogger + overviewCache *cacheEntry + cacheMutex sync.RWMutex +} + +// cacheEntry represents a cached item with timestamp +type cacheEntry struct { + data interface{} + lastUpdated time.Time + isRefreshing bool +} + +// NewClusterCacheServiceImpl creates a new instance of ClusterCacheServiceImpl +func NewClusterCacheServiceImpl(logger *zap.SugaredLogger) *ClusterCacheServiceImpl { + return &ClusterCacheServiceImpl{ + logger: logger, + } +} + +// GetClusterOverview retrieves cluster overview data from cache +func (impl *ClusterCacheServiceImpl) GetClusterOverview() (*bean.ClusterOverviewResponse, bool) { + impl.cacheMutex.RLock() + defer impl.cacheMutex.RUnlock() + + if impl.overviewCache == nil { + return nil, false + } + + if data, ok := impl.overviewCache.data.(*bean.ClusterOverviewResponse); ok { + age := time.Since(impl.overviewCache.lastUpdated) + impl.logger.Infow("cluster overview cache hit", "cacheAge", age) + return data, true + } + + impl.logger.Errorw("cluster overview cache data type mismatch") + return nil, false +} + +// SetClusterOverview stores cluster overview data in cache with timestamp +func (impl *ClusterCacheServiceImpl) SetClusterOverview(data *bean.ClusterOverviewResponse) error { + if data == nil { + return fmt.Errorf("cannot cache nil cluster overview data") + } + + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + impl.overviewCache = &cacheEntry{ + data: data, + lastUpdated: time.Now(), + } + + impl.logger.Debugw("cluster overview data cached", "timestamp", impl.overviewCache.lastUpdated) + return nil +} + +// InvalidateClusterOverview removes cluster overview data from cache +func (impl *ClusterCacheServiceImpl) InvalidateClusterOverview() { + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + impl.overviewCache = nil + impl.logger.Debugw("cluster overview cache invalidated") +} + +// InvalidateAll removes all cached data +func (impl *ClusterCacheServiceImpl) InvalidateAll() { + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + impl.overviewCache = nil + impl.logger.Debugw("all cluster cache invalidated") +} + +// IsRefreshing checks if cache is currently being refreshed +func (impl *ClusterCacheServiceImpl) IsRefreshing() bool { + impl.cacheMutex.RLock() + defer impl.cacheMutex.RUnlock() + + if impl.overviewCache == nil { + return false + } + return impl.overviewCache.isRefreshing +} + +// SetRefreshing marks cache as being refreshed +func (impl *ClusterCacheServiceImpl) SetRefreshing(refreshing bool) { + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + if impl.overviewCache != nil { + impl.overviewCache.isRefreshing = refreshing + } else if refreshing { + // Initialize cache entry if setting refreshing to true + impl.overviewCache = &cacheEntry{ + isRefreshing: true, + lastUpdated: time.Now(), + } + } +} + +// GetCacheAge returns how old the cached data is +func (impl *ClusterCacheServiceImpl) GetCacheAge() time.Duration { + impl.cacheMutex.RLock() + defer impl.cacheMutex.RUnlock() + + if impl.overviewCache == nil { + return 0 + } + return time.Since(impl.overviewCache.lastUpdated) +} diff --git a/pkg/overview/config/ClusterOverviewConfig.go b/pkg/overview/config/ClusterOverviewConfig.go new file mode 100644 index 0000000000..349f8da247 --- /dev/null +++ b/pkg/overview/config/ClusterOverviewConfig.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package config + +import ( + "fmt" + "time" + + "github.com/caarlos0/env" +) + +// ClusterOverviewConfig represents configuration for cluster overview functionality +type ClusterOverviewConfig struct { + // CacheEnabled enables or disables caching for cluster overview data + CacheEnabled bool `env:"CLUSTER_OVERVIEW_CACHE_ENABLED" envDefault:"true" description:"Enable caching for cluster overview data"` + + // BackgroundRefreshEnabled enables proactive background cache refresh + BackgroundRefreshEnabled bool `env:"CLUSTER_OVERVIEW_BACKGROUND_REFRESH_ENABLED" envDefault:"true" description:"Enable background refresh of cluster overview cache"` + + // RefreshIntervalSeconds defines how often to refresh cache in background + RefreshIntervalSeconds int `env:"CLUSTER_OVERVIEW_REFRESH_INTERVAL_SECONDS" envDefault:"15" description:"Background cache refresh interval in seconds"` + + // MaxParallelClusters limits concurrent cluster API calls during refresh + MaxParallelClusters int `env:"CLUSTER_OVERVIEW_MAX_PARALLEL_CLUSTERS" envDefault:"15" description:"Maximum number of clusters to fetch in parallel during refresh"` + + // MaxStaleDataSeconds maximum age of cache before considering it too stale + MaxStaleDataSeconds int `env:"CLUSTER_OVERVIEW_MAX_STALE_DATA_SECONDS" envDefault:"30" description:"Maximum age of cached data in seconds before warning"` +} + +// GetRefreshInterval returns the refresh interval as a time.Duration +func (c *ClusterOverviewConfig) GetRefreshInterval() time.Duration { + return time.Duration(c.RefreshIntervalSeconds) * time.Second +} + +// GetMaxStaleDataDuration returns the max stale data duration as a time.Duration +func (c *ClusterOverviewConfig) GetMaxStaleDataDuration() time.Duration { + return time.Duration(c.MaxStaleDataSeconds) * time.Second +} + +func GetClusterOverviewConfig() (*ClusterOverviewConfig, error) { + cfg := &ClusterOverviewConfig{} + err := env.Parse(cfg) + if err != nil { + return nil, fmt.Errorf("failed to parse infra overview config: %w", err) + } + + return cfg, nil +} diff --git a/pkg/overview/constants/ClusterConstants.go b/pkg/overview/constants/ClusterConstants.go new file mode 100644 index 0000000000..ebcbff7842 --- /dev/null +++ b/pkg/overview/constants/ClusterConstants.go @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package constants + +// Cloud Provider Constants +const ( + ProviderAWS = "AWS" + ProviderGCP = "GCP" + ProviderAzure = "Azure" + ProviderOracle = "Oracle" + ProviderDigitalOcean = "DigitalOcean" + ProviderIBM = "IBM" + ProviderAlibaba = "Alibaba" + ProviderUnknown = "Unknown" +) + +// Node Condition Type Constants +// These map to Kubernetes node condition types +const ( + NodeConditionNetworkUnavailable = "NetworkUnavailable" + NodeConditionMemoryPressure = "MemoryPressure" + NodeConditionDiskPressure = "DiskPressure" + NodeConditionPIDPressure = "PIDPressure" + NodeConditionReady = "Ready" + NodeConditionOthers = "Others" +) + +// Node Error Breakdown Keys +// These are used as keys in the NodeErrorBreakdown map +const ( + NodeErrorNetworkUnavailable = "NetworkUnavailable" + NodeErrorMemoryPressure = "MemoryPressure" + NodeErrorDiskPressure = "DiskPressure" + NodeErrorPIDPressure = "PIDPressure" + NodeErrorKubeletNotReady = "KubeletNotReady" + NodeErrorOthers = "Others" +) + +// Version Constants +const ( + VersionUnknown = "Unknown" +) + +// Autoscaler Type Constants +const ( + AutoscalerKarpenter = "karpenter" + AutoscalerGKE = "gke" + AutoscalerEKS = "eks" + AutoscalerAKS = "aks" + AutoscalerCastAI = "castAi" + AutoscalerClusterAutoscaler = "clusterAutoscaler" + AutoscalerNotDetected = "notDetected" +) + +// Autoscaler Label Constants +// These labels are used to identify which autoscaler manages a node +const ( + // EKS Auto Mode label + LabelEKSComputeType = "eks.amazonaws.com/compute-type" + LabelEKSComputeAuto = "auto" + + // Karpenter label + LabelKarpenterInitialized = "karpenter.sh/initialized" + LabelKarpenterTrue = "true" + + // Cast AI label + LabelCastAIManagedBy = "provisioner.cast.ai/managed-by" + LabelCastAIValue = "cast.ai" + + // GKE label + LabelGKEProvisioning = "cloud.google.com/gke-provisioning" + LabelGKEAutoPilot = "spot" +) + +// Node Name Prefix Constants +const ( + NodePrefixGKE = "gke-" + NodePrefixAKS = "aks-" + NodePrefixEKS = "eks-" + NodePrefixOKE = "oke-" +) + +// Node Name Pattern Constants +const ( + NodePatternAWSComputeInternal = ".compute.internal" + NodePatternAWSEC2Internal = ".ec2.internal" + NodePatternAzureVMSS = "vmss" + NodePatternAzureScaleSets = "scalesets" + NodePatternGCP = "gcp" + NodePatternGoogle = "google" + NodePatternDigitalOcean = "digitalocean" + NodePatternIBMKube = "kube" + NodePatternAliyun = "aliyun" + NodePatternAlibabaRegion = "cn-" +) + +// AWS Region Pattern Constants +var AWSRegionPatterns = []string{ + "us-east-", "us-west-", "eu-west-", "eu-central-", "ap-south-", + "ap-southeast-", "ap-northeast-", "ca-central-", "sa-east-", +} + +// Sort Field Constants for Cluster Overview Detail API +const ( + SortFieldNodeName = "nodeName" + SortFieldClusterName = "clusterName" + SortFieldNodeErrors = "nodeErrors" + SortFieldNodeStatus = "nodeStatus" + SortFieldSchedulable = "schedulable" + SortFieldAutoscalerType = "autoscalerType" +) + +// Sort Order Constants +const ( + SortOrderAsc = "ASC" + SortOrderDesc = "DESC" +) + +// Schedulable Type Constants for filtering +const ( + SchedulableTypeSchedulable = "schedulable" + SchedulableTypeUnschedulable = "unschedulable" +) diff --git a/pkg/overview/constants/TimeConstants.go b/pkg/overview/constants/TimeConstants.go new file mode 100644 index 0000000000..b1be68396f --- /dev/null +++ b/pkg/overview/constants/TimeConstants.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package constants + +import ( + "time" +) + +// TimePeriod represents the predefined time periods +type TimePeriod string + +// TimeWindow represents the predefined time windows (same as TimePeriod but for API consistency) +type TimeWindow string + +const ( + Today TimePeriod = "today" + ThisWeek TimePeriod = "week" + ThisMonth TimePeriod = "month" + ThisQuarter TimePeriod = "quarter" + LastWeek TimePeriod = "lastWeek" + LastMonth TimePeriod = "lastMonth" +) + +// AggregationType represents how data should be aggregated +type AggregationType string + +const ( + AggregateByHour AggregationType = "HOUR" + AggregateByDay AggregationType = "DAY" + AggregateByMonth AggregationType = "MONTH" +) + +// TimeRange represents a time range with from and to timestamps +type TimeRange struct { + From time.Time + To time.Time + AggregationType AggregationType +} + +// IsValidTimePeriod checks if the given string is a valid time period +func IsValidTimePeriod(period string) bool { + switch TimePeriod(period) { + case Today, ThisWeek, ThisMonth, ThisQuarter, LastWeek, LastMonth: + return true + default: + return false + } +} + +// IsValidTimeWindow checks if the given string is a valid time window +func IsValidTimeWindow(window string) bool { + switch window { + case "today", "week", "month", "quarter", "lastWeek", "lastMonth": + return true + default: + return false + } +} + +// GetAggregationType returns the aggregation type for a given time period +// This is used to determine whether to aggregate data by hour, day, or month +func GetAggregationType(period TimePeriod) AggregationType { + switch period { + case Today: + return AggregateByHour + case ThisWeek, ThisMonth, LastWeek, LastMonth: + return AggregateByDay + case ThisQuarter: + return AggregateByMonth + default: + return AggregateByDay + } +} diff --git a/pkg/overview/util/AutoscalerUtil.go b/pkg/overview/util/AutoscalerUtil.go new file mode 100644 index 0000000000..a551d7c054 --- /dev/null +++ b/pkg/overview/util/AutoscalerUtil.go @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import ( + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +// DetermineAutoscalerTypeFromLabels determines the autoscaler type based on node labels (map format) +// This is used by the informer layer which works with native Kubernetes node labels +// Priority order: EKS Auto Mode > Karpenter > Cast AI > GKE > Not Detected +// Note: Cluster Autoscaler (CAS) cannot be reliably detected from node labels alone as it works +// with existing node groups and doesn't add its own labels. Nodes managed by CAS will show as "Not Detected". +func DetermineAutoscalerTypeFromLabels(labels map[string]string) string { + // Check for EKS Auto Mode: eks.amazonaws.com/compute-type=auto + if computeType, exists := labels[constants.LabelEKSComputeType]; exists && computeType == constants.LabelEKSComputeAuto { + return constants.AutoscalerEKS + } + + // Check for Karpenter: karpenter.sh/initialized=true + if initialized, exists := labels[constants.LabelKarpenterInitialized]; exists && initialized == constants.LabelKarpenterTrue { + return constants.AutoscalerKarpenter + } + + // Check for Cast AI: provisioner.cast.ai/managed-by=cast.ai + if managedBy, exists := labels[constants.LabelCastAIManagedBy]; exists && managedBy == constants.LabelCastAIValue { + return constants.AutoscalerCastAI + } + + // Check for GKE: cloud.google.com/gke-provisioning=standard + if provisioning, exists := labels[constants.LabelGKEProvisioning]; exists && provisioning == constants.LabelGKEAutoPilot { + return constants.AutoscalerGKE + } + + // If none of the known autoscaler labels are found, return Not Detected + // This includes nodes managed by Cluster Autoscaler (CAS) as CAS doesn't add unique labels + return constants.AutoscalerNotDetected +} + +// DetermineAutoscalerTypeFromLabelArray determines the autoscaler type based on node labels (array format) +// This is used by the service layer which works with capacity service label objects +// It converts the label array to a map and delegates to DetermineAutoscalerTypeFromLabels +func DetermineAutoscalerTypeFromLabelArray(labels []*capacityBean.LabelAnnotationTaintObject) string { + // Convert label array to map for easier lookup + labelMap := make(map[string]string) + for _, label := range labels { + if label != nil { + labelMap[label.Key] = label.Value + } + } + + // Delegate to the main detection function + return DetermineAutoscalerTypeFromLabels(labelMap) +} diff --git a/pkg/overview/util/ClusterCapacityConverter.go b/pkg/overview/util/ClusterCapacityConverter.go new file mode 100644 index 0000000000..40203f238d --- /dev/null +++ b/pkg/overview/util/ClusterCapacityConverter.go @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import ( + "fmt" + + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + overviewBean "github.com/devtron-labs/devtron/pkg/overview/bean" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// ConvertClusterOverviewToCapacityDetails converts ClusterOverviewResponse to ClusterCapacityDetail list +// This is used to serve resource browser API from cluster overview cache +func ConvertClusterOverviewToCapacityDetails( + logger *zap.SugaredLogger, + overviewResponse *overviewBean.ClusterOverviewResponse, + allClusters []clusterBean.ClusterBean, +) []*capacityBean.ClusterCapacityDetail { + if overviewResponse == nil { + logger.Warn("overview response is nil, cannot convert to capacity details") + return nil + } + + capacityDetails := make([]*capacityBean.ClusterCapacityDetail, 0, len(overviewResponse.ClusterCapacityDistribution)) + + // Create a map for quick lookup of cluster beans + clusterMap := make(map[int]clusterBean.ClusterBean) + for _, cluster := range allClusters { + clusterMap[cluster.Id] = cluster + } + + // Create a map for quick lookup of capacity distribution + capacityDistMap := make(map[int]overviewBean.ClusterCapacityDistribution) + for _, capacityDist := range overviewResponse.ClusterCapacityDistribution { + capacityDistMap[capacityDist.ClusterID] = capacityDist + } + + // Create a map for node errors by cluster + nodeErrorsByCluster := make(map[int]map[corev1.NodeConditionType][]string) + for _, nodeError := range overviewResponse.NodeErrorBreakdown.NodeErrors { + if _, exists := nodeErrorsByCluster[nodeError.ClusterID]; !exists { + nodeErrorsByCluster[nodeError.ClusterID] = make(map[corev1.NodeConditionType][]string) + } + // Convert error strings to NodeConditionType + for _, errorStr := range nodeError.Errors { + conditionType := corev1.NodeConditionType(errorStr) + nodeErrorsByCluster[nodeError.ClusterID][conditionType] = append( + nodeErrorsByCluster[nodeError.ClusterID][conditionType], + nodeError.NodeName, + ) + } + } + + // Create a map for node count by cluster + nodeCountByCluster := make(map[int]int) + for _, nodeCount := range overviewResponse.NodeDistribution.ByClusters { + nodeCountByCluster[nodeCount.ClusterID] = nodeCount.NodeCount + } + + // Build capacity details for each cluster + for _, cluster := range allClusters { + capacityDist, hasCapacity := capacityDistMap[cluster.Id] + + var detail *capacityBean.ClusterCapacityDetail + if hasCapacity { + // Cluster has capacity data (connected cluster) + detail = buildCapacityDetailFromOverview( + cluster, + capacityDist, + nodeErrorsByCluster[cluster.Id], + nodeCountByCluster[cluster.Id], + ) + } else { + // Connection failed cluster + detail = &capacityBean.ClusterCapacityDetail{ + Id: cluster.Id, + Name: cluster.ClusterName, + ErrorInConnection: cluster.ErrorInConnecting, + Status: capacityBean.ClusterStatusConnectionFailed, + IsVirtualCluster: cluster.IsVirtualCluster, + IsProd: cluster.IsProd, + } + } + + capacityDetails = append(capacityDetails, detail) + } + + logger.Debugw("converted cluster overview to capacity details", + "totalClusters", len(capacityDetails), + "connectedClusters", len(overviewResponse.ClusterCapacityDistribution)) + + return capacityDetails +} + +// buildCapacityDetailFromOverview builds a single ClusterCapacityDetail from overview data +func buildCapacityDetailFromOverview( + cluster clusterBean.ClusterBean, + capacityDist overviewBean.ClusterCapacityDistribution, + nodeErrors map[corev1.NodeConditionType][]string, + nodeCount int, +) *capacityBean.ClusterCapacityDetail { + // Determine cluster status based on node errors + status := capacityBean.ClusterStatusHealthy + if len(nodeErrors) > 0 { + status = capacityBean.ClusterStatusUnHealthy + } + + // Build CPU and Memory resource objects + cpuResource := &capacityBean.ResourceDetailObject{ + Capacity: fmt.Sprintf("%.2f", capacityDist.CPU.Capacity), + } + + // Add utilization, requests, and limits percentages if available + if capacityDist.CPU.UtilizationPercent > 0 { + cpuResource.UsagePercentage = fmt.Sprintf("%.2f", capacityDist.CPU.UtilizationPercent) + } + if capacityDist.CPU.RequestsPercent > 0 { + cpuResource.RequestPercentage = fmt.Sprintf("%.2f", capacityDist.CPU.RequestsPercent) + } + if capacityDist.CPU.LimitsPercent > 0 { + cpuResource.LimitPercentage = fmt.Sprintf("%.2f", capacityDist.CPU.LimitsPercent) + } + + memoryResource := &capacityBean.ResourceDetailObject{ + Capacity: fmt.Sprintf("%.2fGi", capacityDist.Memory.Capacity), + } + + // Add utilization, requests, and limits percentages if available + if capacityDist.Memory.UtilizationPercent > 0 { + memoryResource.UsagePercentage = fmt.Sprintf("%.2f", capacityDist.Memory.UtilizationPercent) + } + if capacityDist.Memory.RequestsPercent > 0 { + memoryResource.RequestPercentage = fmt.Sprintf("%.2f", capacityDist.Memory.RequestsPercent) + } + if capacityDist.Memory.LimitsPercent > 0 { + memoryResource.LimitPercentage = fmt.Sprintf("%.2f", capacityDist.Memory.LimitsPercent) + } + + return &capacityBean.ClusterCapacityDetail{ + Id: cluster.Id, + Name: cluster.ClusterName, + NodeCount: nodeCount, + NodeDetails: []capacityBean.NodeDetails{}, // Not available in overview cache + NodeErrors: nodeErrors, + NodeK8sVersions: []string{}, // Not available in overview cache + ServerVersion: "", // Not available in overview cache + Cpu: cpuResource, + Memory: memoryResource, + Status: status, + IsVirtualCluster: cluster.IsVirtualCluster, + IsProd: cluster.IsProd, + } +} diff --git a/pkg/overview/util/DoraMetricUtils.go b/pkg/overview/util/DoraMetricUtils.go new file mode 100644 index 0000000000..b7e8d26813 --- /dev/null +++ b/pkg/overview/util/DoraMetricUtils.go @@ -0,0 +1,201 @@ +package util + +import ( + "time" + + "github.com/devtron-labs/devtron/pkg/overview/bean" +) + +func CreateDoraMetricObject(overallAverageValue float64, overallAverageUnit bean.MetricValueUnit, comparisonValue int, comparisonUnit bean.ComparisonUnit, performanceLevelCount *bean.PerformanceLevelCount) *bean.DoraMetric { + return &bean.DoraMetric{ + OverallAverage: &bean.MetricValue{ + Value: overallAverageValue, + Unit: overallAverageUnit.ToString(), + }, + ComparisonValue: comparisonValue, + ComparisonUnit: comparisonUnit, + PerformanceLevelCount: performanceLevelCount, + } +} + +// CalculateComparison calculates the comparison value between current and previous periods +// Returns percentage for DeploymentFrequency and ChangeFailureRate, minutes for MeanLeadTime and MeanTimeToRecovery +func CalculateComparison(current, previous float64, metricCategory bean.MetricCategory) int { + switch metricCategory { + case bean.MetricCategoryDeploymentFrequency, bean.MetricCategoryChangeFailureRate: + if previous == 0 { + if current > 0 { + return 100 // Return 100% increase when previous was 0 + } + return 0 + } + // Calculate percentage change for frequency and failure rate metrics + percentageChange := ((current - previous) / previous) * 100 + return int(percentageChange) + case bean.MetricCategoryMeanLeadTime, bean.MetricCategoryMeanTimeToRecovery: + if previous == 0 { + if current > 0 { + return int(current) + } + return 0 + } + // Calculate minutes difference for time-based metrics + return int(current - previous) + default: + return 0 + } +} + +// CalculatePerformanceLevelsForMetric calculates the count of pipelines in each performance category for a specific metric +func CalculatePerformanceLevelsForMetric(metricsData map[string]*bean.LensMetrics, metricCategory bean.MetricCategory) *bean.PerformanceLevelCount { + performanceLevels := &bean.PerformanceLevelCount{ + Elite: 0, + High: 0, + Medium: 0, + Low: 0, + } + + if len(metricsData) == 0 { + return performanceLevels + } + + // Categorize each app-env pair based on the specific metric + for _, lensMetrics := range metricsData { + var metricValue float64 + + // Get the appropriate metric value based on category + switch metricCategory { + case bean.MetricCategoryDeploymentFrequency: + metricValue = lensMetrics.AverageCycleTime + case bean.MetricCategoryMeanLeadTime: + metricValue = lensMetrics.AverageLeadTime + case bean.MetricCategoryChangeFailureRate: + metricValue = lensMetrics.ChangeFailureRate + case bean.MetricCategoryMeanTimeToRecovery: + metricValue = lensMetrics.AverageRecoveryTime + default: + // Default to low performance for unknown metric categories + performanceLevels.Low++ + continue + } + + // Categorize based on the specific metric thresholds + if IsInMetricCategory(metricValue, metricCategory, bean.PerformanceElite) { + performanceLevels.Elite++ + } else if IsInMetricCategory(metricValue, metricCategory, bean.PerformanceHigh) { + performanceLevels.High++ + } else if IsInMetricCategory(metricValue, metricCategory, bean.PerformanceMedium) { + performanceLevels.Medium++ + } else { + performanceLevels.Low++ + } + } + + return performanceLevels +} + +// IsInMetricCategory routes to the appropriate category checking function based on metric type +func IsInMetricCategory(value float64, metricCategory bean.MetricCategory, performanceCategory bean.PerformanceCategory) bool { + switch metricCategory { + case bean.MetricCategoryDeploymentFrequency: + return IsInDeploymentFrequencyCategory(value, performanceCategory) + case bean.MetricCategoryMeanLeadTime: + return IsInLeadTimeCategory(value, performanceCategory) + case bean.MetricCategoryChangeFailureRate: + return IsInChangeFailureRateCategory(value, performanceCategory) + case bean.MetricCategoryMeanTimeToRecovery: + return IsInRecoveryTimeCategory(value, performanceCategory) + default: + return false + } +} + +// IsInDeploymentFrequencyCategory checks deployment frequency thresholds +func IsInDeploymentFrequencyCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value >= 1.0 // On demand (multiple deploys per day) + case bean.PerformanceHigh: + return value >= 0.14 && value < 1.0 // Between once per day and once per week (1/7 ≈ 0.14) + case bean.PerformanceMedium: + return value >= 0.033 && value < 0.14 // Between once per week and once per month (1/30 ≈ 0.033) + case bean.PerformanceLow: + return value < 0.033 // Between once per month and once every six months + } + return false +} + +// IsInLeadTimeCategory checks change lead time thresholds (lower is better) +func IsInLeadTimeCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value < 24 // Less than one day + case bean.PerformanceHigh: + return value >= 24 && value <= 168 // Between one day and one week + case bean.PerformanceMedium: + return value > 168 && value <= 720 // Between one week and one month + case bean.PerformanceLow: + return value > 720 && value <= 4320 // Between one month and six months + } + return false +} + +// IsInRecoveryTimeCategory checks failed deployment recovery time thresholds (lower is better) +func IsInRecoveryTimeCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value < 1 // Less than one hour + case bean.PerformanceHigh: + return value >= 1 && value < 24 // Less than one day (1-24 hours) + case bean.PerformanceMedium: + return value >= 24 && value < 168 // Less than one day to one week (assuming this is the intended range) + case bean.PerformanceLow: + return value >= 168 && value <= 720 // Between one week and one month + } + return false +} + +// IsInChangeFailureRateCategory checks change failure rate thresholds (lower is better) +func IsInChangeFailureRateCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value <= 5 // 5% or less + case bean.PerformanceHigh: + return value > 5 && value <= 10 // 6-10% (interpreting the table logically) + case bean.PerformanceMedium: + return value > 10 && value <= 20 // 11-20% + case bean.PerformanceLow: + return value > 20 && value <= 40 // 21-40% + } + return false +} + +// CalculateAverageFromValues calculates average from a slice of float64 values +func CalculateAverageFromValues(values []float64) float64 { + if len(values) == 0 { + return 0.0 + } + + var total float64 + for _, value := range values { + total += value + } + + return total / float64(len(values)) +} + +// CalculatePreviousPeriod calculates the previous period dates for comparison +func CalculatePreviousPeriod(from, to *time.Time) (*time.Time, *time.Time) { + if from == nil || to == nil { + return nil, nil + } + + // Calculate the duration of the current period + duration := to.Sub(*from) + + // Previous period ends where current period starts, and starts duration before that + previousTo := *from + previousFrom := from.Add(-duration) + + return &previousFrom, &previousTo +} diff --git a/pkg/overview/util/MathUtil.go b/pkg/overview/util/MathUtil.go new file mode 100644 index 0000000000..acf4ec34d1 --- /dev/null +++ b/pkg/overview/util/MathUtil.go @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import "math" + +// RoundToTwoDecimals rounds a float64 value to 2 decimal places (e.g., 72.054321 -> 72.05) +func RoundToTwoDecimals(value float64) float64 { + return math.Round(value*100) / 100 +} diff --git a/pkg/overview/util/TimeWindowUtil.go b/pkg/overview/util/TimeWindowUtil.go new file mode 100644 index 0000000000..c42deebee9 --- /dev/null +++ b/pkg/overview/util/TimeWindowUtil.go @@ -0,0 +1,184 @@ +package util + +import ( + "fmt" + "time" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +// TimeRange represents a time range with from and to timestamps +type TimeRange struct { + From time.Time + To time.Time +} + +// ParseTimeString helper function to parse time string in ISO 8601 format +func ParseTimeString(timeStr string) (time.Time, error) { + // Try parsing with different time formats + formats := []string{ + time.RFC3339, // "2006-01-02T15:04:05Z07:00" + time.RFC3339Nano, // "2006-01-02T15:04:05.999999999Z07:00" + "2006-01-02T15:04:05Z", + "2006-01-02T15:04:05.000Z", + "2006-01-02T15:04:05-07:00", + "2006-01-02T15:04:05.000-07:00", + "2006-01-02 15:04:05", + "2006-01-02", + } + + for _, format := range formats { + if t, err := time.Parse(format, timeStr); err == nil { + return t, nil + } + } + + return time.Time{}, fmt.Errorf("invalid time format: %s", timeStr) +} + +// GetCurrentTimePeriodBasedOnTimeWindow parses the time-based filter request with timeWindow support using individual parameters +func GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to string) (*utils.TimeRangeRequest, error) { + timeRangeReq := &utils.TimeRangeRequest{} + if len(timeWindow) > 0 { + timeWindowType := utils.TimeWindows(timeWindow) + timeRangeReq = utils.NewTimeWindowRequest(timeWindowType) + } else { + if len(from) == 0 || len(to) == 0 { + return nil, fmt.Errorf("either timeWindow or both from/to parameters must be provided") + } + var fromTime, toTime *time.Time + if parsedTime, err := ParseTimeString(from); err == nil { + fromTime = &parsedTime + } else { + return nil, fmt.Errorf("invalid 'from' time format: %s", from) + } + if parsedTime, err := ParseTimeString(to); err == nil { + toTime = &parsedTime + } else { + return nil, fmt.Errorf("invalid 'from' time format: %s", to) + } + timeRangeReq = utils.NewTimeRangeRequest(fromTime, toTime) + } + timeRange, err := timeRangeReq.ParseAndValidateTimeRange() + if err != nil { + return nil, err + } + + return timeRange, nil +} + +// calculatePreviousTimeRangeFromDuration calculates the previous time range based on current time range duration +// currentFrom becomes the previous To, and prevFrom is calculated by subtracting the duration from currentFrom +func calculatePreviousTimeRangeFromDuration(currentFrom, currentTo *time.Time) (*utils.TimeRangeRequest, error) { + if currentFrom == nil || currentTo == nil { + return nil, fmt.Errorf("currentFrom and currentTo cannot be nil") + } + + // Calculate the duration between current from and to + duration := currentTo.Sub(*currentFrom) + + // Previous To becomes current From + prevTo := *currentFrom + + // Previous From is calculated by subtracting the duration from current From + prevFrom := currentFrom.Add(-duration) + + // Create time range request for the calculated previous period + timeRangeReq := utils.NewTimeRangeRequest(&prevFrom, &prevTo) + + // Parse and validate the time range + timeRange, err := timeRangeReq.ParseAndValidateTimeRange() + if err != nil { + return nil, fmt.Errorf("failed to parse calculated previous time period: %w", err) + } + + return timeRange, nil +} + +// GetPreviousTimePeriodBasedOnTimeWindow calculates the previous from and to using the timeWindow key +// It maps current time windows to their previous equivalents and calls ParseAndValidateTimeRange +// For unknown timeWindows, it falls back to duration-based calculation using currentFrom and currentTo +func GetPreviousTimePeriodBasedOnTimeWindow(timeWindow string, currentFrom, currentTo *time.Time) (*utils.TimeRangeRequest, error) { + var previousTimeWindow utils.TimeWindows + + switch utils.TimeWindows(timeWindow) { + case utils.Today: + // If user provided today, use yesterday + previousTimeWindow = utils.Yesterday + case utils.Week: + // If user provided week, use lastWeek + previousTimeWindow = utils.LastWeek + case utils.Month: + // If user provided month, use lastMonth + previousTimeWindow = utils.LastMonth + case utils.Quarter: + // If user provided quarter, use lastQuarter + previousTimeWindow = utils.LastQuarter + default: + // Fallback to duration-based calculation for unknown timeWindows + return calculatePreviousTimeRangeFromDuration(currentFrom, currentTo) + } + + // Create time window request for the previous period + timeRangeReq := utils.NewTimeWindowRequest(previousTimeWindow) + + // Parse and validate the time range + timeRange, err := timeRangeReq.ParseAndValidateTimeRange() + if err != nil { + return nil, fmt.Errorf("failed to parse previous time period for timeWindow %s: %w", timeWindow, err) + } + + return timeRange, nil +} + +// GetCurrentAndPreviousTimeRangeBasedOnTimeWindow calculates and returns the current and previous time ranges based on a time window. +// It supports parsing and validating a time-based filter with optional "from" and "to" parameters and time window input. +// Returns two time range requests for the current and previous periods, or an error if parsing or validation fails. +func GetCurrentAndPreviousTimeRangeBasedOnTimeWindow(timeWindow, from, to string) (*utils.TimeRangeRequest, *utils.TimeRangeRequest, error) { + // Get current time range + currentFromTo, err := GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse current time period: %w", err) + } + + // Get previous time range using current time range for fallback calculation + prevFromTo, err := GetPreviousTimePeriodBasedOnTimeWindow(timeWindow, currentFromTo.From, currentFromTo.To) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse previous time period: %w", err) + } + + return currentFromTo, prevFromTo, nil +} + +// GetTimePeriodFromTimeRange determines the time period based on the duration between from and to +func GetTimePeriodFromTimeRange(from, to *time.Time) constants.TimePeriod { + if from == nil || to == nil { + return constants.ThisWeek // default + } + + duration := to.Sub(*from) + + // If the duration is approximately 1 day (within 2 hours tolerance) + if duration <= 26*time.Hour && duration >= 22*time.Hour { + return constants.Today + } + + // If the duration is approximately 1 week (within 1 day tolerance) + if duration <= 8*24*time.Hour && duration >= 6*24*time.Hour { + return constants.ThisWeek + } + + // If the duration is approximately 1 month (within 3 days tolerance) + if duration <= 33*24*time.Hour && duration >= 28*24*time.Hour { + return constants.ThisMonth + } + + // If the duration is approximately 3 months (within 1 week tolerance) + if duration <= 97*24*time.Hour && duration >= 83*24*time.Hour { + return constants.ThisQuarter + } + + // Default to week for other durations + return constants.ThisWeek +} diff --git a/pkg/overview/util/TrendCalculator.go b/pkg/overview/util/TrendCalculator.go new file mode 100644 index 0000000000..ee54819773 --- /dev/null +++ b/pkg/overview/util/TrendCalculator.go @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import ( + "time" + + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + teamRepository "github.com/devtron-labs/devtron/pkg/team/repository" +) + +// TrendCalculator provides utility functions for calculating trend comparisons +type TrendCalculator struct{} + +// NewTrendCalculator creates a new instance of TrendCalculator +func NewTrendCalculator() *TrendCalculator { + return &TrendCalculator{} +} + +// CalculateTrendComparison calculates the trend comparison between current and previous period +func (tc *TrendCalculator) CalculateTrendComparison(currentValue, previousValue int, from, to *time.Time) *bean.TrendComparison { + timePeriod := GetTimePeriodFromTimeRange(from, to) + + if previousValue == 0 && currentValue == 0 { + return &bean.TrendComparison{ + Value: 0, + Label: tc.getTrendLabel(timePeriod), + } + } + + difference := currentValue - previousValue + return &bean.TrendComparison{ + Value: difference, + Label: tc.getTrendLabel(timePeriod), + } +} + +// CalculatePercentageTrendComparison calculates the trend comparison for percentage values +func (tc *TrendCalculator) CalculatePercentageTrendComparison(currentPercentage, previousPercentage float64, from, to *time.Time) *bean.TrendComparison { + timePeriod := GetTimePeriodFromTimeRange(from, to) + + difference := int(currentPercentage - previousPercentage) + return &bean.TrendComparison{ + Value: difference, + Label: tc.getTrendLabel(timePeriod), + } +} + +// GetPreviousPeriodTimeRange calculates the time range for the previous period based on the current period +// It simply subtracts the duration from the current period to get the previous period +func (tc *TrendCalculator) GetPreviousPeriodTimeRange(from, to *time.Time) (*time.Time, *time.Time) { + if from == nil || to == nil { + return nil, nil + } + + // Calculate the duration of the current period + duration := to.Sub(*from) + + // Previous period ends where current period starts, and starts duration before that + prevTo := *from + prevFrom := from.Add(-duration) + + return &prevFrom, &prevTo +} + +// getTrendLabel returns the appropriate label for the trend comparison +func (tc *TrendCalculator) getTrendLabel(timePeriod constants.TimePeriod) string { + switch timePeriod { + case constants.Today: + return "today" + case constants.ThisWeek: + return "this week" + case constants.ThisMonth: + return "this month" + case constants.ThisQuarter: + return "this quarter" + case constants.LastWeek: + return "last week" + case constants.LastMonth: + return "last month" + default: + return "this period" + } +} + +// CalculateTrendForTimeDataPoints calculates trend comparison for time-based data points +func (tc *TrendCalculator) CalculateTrendForTimeDataPoints(currentData, previousData []bean.TimeDataPoint, from, to *time.Time) *bean.TrendComparison { + currentTotal := 0 + for _, point := range currentData { + currentTotal += point.Count + } + + previousTotal := 0 + for _, point := range previousData { + previousTotal += point.Count + } + + return tc.CalculateTrendComparison(currentTotal, previousTotal, from, to) +} + +// CalculatePercentageFromCounts calculates percentage from counts +func (tc *TrendCalculator) CalculatePercentageFromCounts(numerator, denominator int) float64 { + if denominator == 0 { + return 0.0 + } + return (float64(numerator) / float64(denominator)) * 100.0 +} + +// CalculateAverageFromCounts calculates average from total and count +func (tc *TrendCalculator) CalculateAverageFromCounts(total, count int) float64 { + if count == 0 { + return 0.0 + } + return float64(total) / float64(count) +} + +// FilterTeamsByTimeRange filters teams by time range based on created_on field +func FilterTeamsByTimeRange(teams []teamRepository.Team, from, to *time.Time) []teamRepository.Team { + if from == nil && to == nil { + return teams + } + + filtered := make([]teamRepository.Team, 0) + for _, team := range teams { + if IsWithinTimeRange(team.CreatedOn, from, to) { + filtered = append(filtered, team) + } + } + return filtered +} + +// FilterAppsByTimeRange filters apps by time range based on created_on field +func FilterAppsByTimeRange(apps []*app.App, from, to *time.Time) []*app.App { + if from == nil && to == nil { + return apps + } + + filtered := make([]*app.App, 0) + for _, app := range apps { + if IsWithinTimeRange(app.CreatedOn, from, to) { + filtered = append(filtered, app) + } + } + return filtered +} + +// FilterEnvironmentsByTimeRange filters environments by time range based on created_on field +func FilterEnvironmentsByTimeRange(environments []*repository.Environment, from, to *time.Time) []*repository.Environment { + if from == nil && to == nil { + return environments + } + + filtered := make([]*repository.Environment, 0) + for _, env := range environments { + if IsWithinTimeRange(env.CreatedOn, from, to) { + filtered = append(filtered, env) + } + } + return filtered +} + +// FilterCiPipelinesByTimeRange filters CI pipelines by time range based on created_on field +func FilterCiPipelinesByTimeRange(pipelines []*pipelineConfig.CiPipeline, from, to *time.Time) []*pipelineConfig.CiPipeline { + if from == nil && to == nil { + return pipelines + } + + filtered := make([]*pipelineConfig.CiPipeline, 0) + for _, pipeline := range pipelines { + if IsWithinTimeRange(pipeline.CreatedOn, from, to) { + filtered = append(filtered, pipeline) + } + } + return filtered +} + +// FilterCdPipelinesByTimeRange filters CD pipelines by time range based on created_on field +func FilterCdPipelinesByTimeRange(pipelines []*pipelineConfig.Pipeline, from, to *time.Time) []*pipelineConfig.Pipeline { + if from == nil && to == nil { + return pipelines + } + + filtered := make([]*pipelineConfig.Pipeline, 0) + for _, pipeline := range pipelines { + if IsWithinTimeRange(pipeline.CreatedOn, from, to) { + filtered = append(filtered, pipeline) + } + } + return filtered +} + +// IsWithinTimeRange checks if a timestamp is within the given time range +func IsWithinTimeRange(timestamp time.Time, from, to *time.Time) bool { + if from != nil && timestamp.Before(*from) { + return false + } + if to != nil && timestamp.After(*to) { + return false + } + return true +} + +// CalculateAppTrendFromPeriodComparison calculates trend by comparing current and previous period app counts +func CalculateAppTrendFromPeriodComparison(currentApps, previousApps []*app.App) int { + currentCount := 0 + for _, app := range currentApps { + if app.Active { + currentCount++ + } + } + + previousCount := 0 + for _, app := range previousApps { + if app.Active { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateTeamTrendFromPeriodComparison calculates trend by comparing current and previous period team counts +func CalculateTeamTrendFromPeriodComparison(currentTeams, previousTeams []teamRepository.Team) int { + currentCount := 0 + for _, team := range currentTeams { + if team.Active { + currentCount++ + } + } + + previousCount := 0 + for _, team := range previousTeams { + if team.Active { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateEnvironmentTrendFromPeriodComparison calculates trend by comparing current and previous period environment counts +func CalculateEnvironmentTrendFromPeriodComparison(currentEnvs, previousEnvs []*repository.Environment) int { + currentCount := 0 + for _, env := range currentEnvs { + if env.Active { + currentCount++ + } + } + + previousCount := 0 + for _, env := range previousEnvs { + if env.Active { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateCiPipelineTrendFromPeriodComparison calculates trend by comparing current and previous period CI pipeline counts +func CalculateCiPipelineTrendFromPeriodComparison(currentPipelines, previousPipelines []*pipelineConfig.CiPipeline) int { + currentCount := 0 + for _, pipeline := range currentPipelines { + if !pipeline.Deleted { + currentCount++ + } + } + + previousCount := 0 + for _, pipeline := range previousPipelines { + if !pipeline.Deleted { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateCdPipelineTrendFromPeriodComparison calculates trend by comparing current and previous period CD pipeline counts +func CalculateCdPipelineTrendFromPeriodComparison(currentPipelines, previousPipelines []*pipelineConfig.Pipeline) int { + currentCount := 0 + for _, pipeline := range currentPipelines { + if !pipeline.Deleted { + currentCount++ + } + } + + previousCount := 0 + for _, pipeline := range previousPipelines { + if !pipeline.Deleted { + previousCount++ + } + } + + return currentCount - previousCount +} diff --git a/pkg/overview/wire_overview.go b/pkg/overview/wire_overview.go new file mode 100644 index 0000000000..55590a9e82 --- /dev/null +++ b/pkg/overview/wire_overview.go @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/devtron-labs/devtron/pkg/overview/config" + "github.com/google/wire" +) + +// OverviewWireSet provides wire set for overview module +var OverviewWireSet = wire.NewSet( + config.GetClusterOverviewConfig, + + // Service layer + NewAppManagementServiceImpl, + wire.Bind(new(AppManagementService), new(*AppManagementServiceImpl)), + + NewDoraMetricsServiceImpl, + wire.Bind(new(DoraMetricsService), new(*DoraMetricsServiceImpl)), + + NewInsightsServiceImpl, + wire.Bind(new(InsightsService), new(*InsightsServiceImpl)), + + // Cluster cache service + cache.NewClusterCacheServiceImpl, + wire.Bind(new(cache.ClusterCacheService), new(*cache.ClusterCacheServiceImpl)), + + // Cluster overview service (uses background refresh worker) + NewClusterOverviewServiceImpl, + wire.Bind(new(ClusterOverviewService), new(*ClusterOverviewServiceImpl)), + + // Security overview service (uses existing image scanning repositories) + NewSecurityOverviewServiceImpl, + wire.Bind(new(SecurityOverviewService), new(*SecurityOverviewServiceImpl)), + + // Main overview service + NewOverviewServiceImpl, + wire.Bind(new(OverviewService), new(*OverviewServiceImpl)), +) diff --git a/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go b/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go index d384f30c3b..c2d661b32b 100644 --- a/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go +++ b/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go @@ -1,6 +1,8 @@ package repository import ( + "time" + "github.com/devtron-labs/devtron/pkg/pipeline/workflowStatus/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" @@ -13,6 +15,7 @@ type WorkflowStageRepository interface { GetWorkflowStagesByWorkflowIdAndType(workflowId int, workflowType string) ([]*WorkflowExecutionStage, error) GetWorkflowStagesByWorkflowIdAndWtype(wfId int, wfType string) ([]*WorkflowExecutionStage, error) GetWorkflowStagesByWorkflowIdsAndWtype(wfIds []int, wfType string) ([]*WorkflowExecutionStage, error) + GetSuccessfulCIExecutionStages(from, to *time.Time) ([]*WorkflowExecutionStage, error) } type WorkflowStageRepositoryImpl struct { @@ -92,3 +95,22 @@ func (impl *WorkflowStageRepositoryImpl) GetWorkflowStagesByWorkflowIdsAndWtype( } return workflowStages, err } + +func (impl *WorkflowStageRepositoryImpl) GetSuccessfulCIExecutionStages(from, to *time.Time) ([]*WorkflowExecutionStage, error) { + var workflowStages []*WorkflowExecutionStage + err := impl.dbConnection.Model(&workflowStages). + Where("workflow_type = ?", "CI"). + Where("stage_name = ?", "Execution"). + Where("status = ?", "SUCCEEDED"). + Where("status_for = ?", "workflow"). + Where("start_time IS NOT NULL"). + Where("end_time IS NOT NULL"). + Where("created_on >= ? AND created_on <= ?", from, to). + Order("id ASC"). + Select() + if err != nil { + impl.logger.Errorw("error in fetching successful CI execution stages", "err", err) + return workflowStages, err + } + return workflowStages, nil +} diff --git a/pkg/policyGovernance/security/imageScanning/ImageScanService.go b/pkg/policyGovernance/security/imageScanning/ImageScanService.go index f0268c20a8..1b04acecca 100644 --- a/pkg/policyGovernance/security/imageScanning/ImageScanService.go +++ b/pkg/policyGovernance/security/imageScanning/ImageScanService.go @@ -18,6 +18,10 @@ package imageScanning import ( "context" + "fmt" + "strings" + "time" + bean4 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/cluster/environment" @@ -29,8 +33,9 @@ import ( securityBean "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" repository2 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool/repository" "github.com/devtron-labs/devtron/pkg/workflow/cd/read" + "github.com/devtron-labs/devtron/util/sliceUtil" "go.opentelemetry.io/otel" - "time" + "golang.org/x/exp/slices" "github.com/devtron-labs/devtron/internal/sql/repository" repository1 "github.com/devtron-labs/devtron/internal/sql/repository/app" @@ -49,6 +54,9 @@ type ImageScanService interface { VulnerabilityExposure(request *repository3.VulnerabilityRequest) (*repository3.VulnerabilityExposureListingResponse, error) GetArtifactVulnerabilityStatus(ctx context.Context, request *bean2.VulnerabilityCheckRequest) (bool, error) IsImageScanExecutionCompleted(image, imageDigest string) (bool, error) + FetchVulnerabilitySummary(ctx context.Context, request *bean3.VulnerabilitySummaryRequest, ids []int) (*bean3.VulnerabilitySummary, error) + FetchVulnerabilityListing(ctx context.Context, request *bean3.VulnerabilityListingRequest, ids []int) (*bean3.VulnerabilityListingResponse, error) + // resource scanning functions below GetScanResults(resourceScanQueryParams *bean3.ResourceScanQueryParams) (parser.ResourceScanResponseDto, error) FilterDeployInfoByScannedArtifactsDeployedInEnv(deployInfoList []*repository3.ImageScanDeployInfo) ([]*repository3.ImageScanDeployInfo, error) @@ -109,7 +117,44 @@ func (impl ImageScanServiceImpl) FetchAllDeployInfo(request *bean3.ImageScanRequ return deployedList, nil } -func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { +func (impl *ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { + + // Handle different scan status cases + if request.ScanStatus == securityBean.ScanStatusNotScanned { + // Show only not-scanned items + return impl.fetchNonScannedAppEnvListing(request, deployInfoIds) + } else if request.ScanStatus == securityBean.ScanStatusScanned { + // Show only scanned items + return impl.fetchScannedAppEnvListing(request, deployInfoIds) + } + + // ScanStatusAll (default: empty string) - show both scanned and not-scanned + // Fetch both scanned and not-scanned items and merge them + scannedResponse, err := impl.fetchScannedAppEnvListing(request, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching scanned items", "err", err) + return nil, err + } + + notScannedResponse, err := impl.fetchNonScannedAppEnvListing(request, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching not-scanned items", "err", err) + return nil, err + } + + // Merge the responses + mergedResponse := &bean3.ImageScanHistoryListingResponse{ + Offset: request.Offset, + Size: request.Size, + ImageScanHistoryResponse: append(scannedResponse.ImageScanHistoryResponse, notScannedResponse.ImageScanHistoryResponse...), + Total: scannedResponse.Total + notScannedResponse.Total, + } + + return mergedResponse, nil +} + +// fetchScannedAppEnvListing fetches scanned app-env combinations +func (impl *ImageScanServiceImpl) fetchScannedAppEnvListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { groupByList, err := impl.imageScanDeployInfoRepository.ScanListingWithFilter(&request.ImageScanFilter, request.Size, request.Offset, deployInfoIds) if err != nil { impl.Logger.Errorw("error while fetching scan execution result", "err", err) @@ -138,7 +183,6 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS groupByListMap[item.Id] = item executionHistoryIds = append(executionHistoryIds, item.ImageScanExecutionHistoryId...) } - // fetching all execution history in bulk for updating last check time in case when no vul are found(no results will be saved) mapOfExecutionHistoryIdVsLastExecTime, err := impl.fetchImageExecutionHistoryMapByIds(executionHistoryIds) if err != nil { @@ -147,6 +191,7 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS } var finalResponseList []*bean3.ImageScanHistoryResponse + for _, item := range groupByList { imageScanHistoryResponse := &bean3.ImageScanHistoryResponse{} var lastChecked time.Time @@ -155,6 +200,7 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS highCount := 0 moderateCount := 0 lowCount, unkownCount := 0, 0 + fixableCount := 0 imageScanDeployInfo := groupByListMap[item.Id] if imageScanDeployInfo != nil { scanResultList, err := impl.scanResultRepository.FetchByScanExecutionIds(imageScanDeployInfo.ImageScanExecutionHistoryId) @@ -170,6 +216,11 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS for _, item := range scanResultList { lastChecked = item.ImageScanExecutionHistory.ExecutionTime criticalCount, highCount, moderateCount, lowCount, unkownCount = impl.updateCount(item.CveStore.GetSeverity(), criticalCount, highCount, moderateCount, lowCount, unkownCount) + + // Count fixable vulnerabilities (those with a fixed version) + if item.FixedVersion != "" { + fixableCount++ + } } // updating in case when no vul are found (no results) if lastChecked.IsZero() && len(imageScanDeployInfo.ImageScanExecutionHistoryId) > 0 && mapOfExecutionHistoryIdVsLastExecTime != nil { @@ -192,6 +243,15 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS imageScanHistoryResponse.LastChecked = &lastChecked } imageScanHistoryResponse.SeverityCount = severityCount + imageScanHistoryResponse.FixableVulnerabilities = fixableCount + + // Set scan status based on whether it's scanned or not + if imageScanDeployInfo != nil && len(imageScanDeployInfo.ImageScanExecutionHistoryId) > 0 && imageScanDeployInfo.ImageScanExecutionHistoryId[0] != -1 { + imageScanHistoryResponse.ScanStatus = "scanned" + } else { + imageScanHistoryResponse.ScanStatus = "not-scanned" + } + if imageScanDeployInfo != nil { imageScanHistoryResponse.EnvId = imageScanDeployInfo.EnvId } @@ -245,6 +305,91 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS return finalResponse, err } +// fetchNonScannedAppEnvListing fetches non-scanned app-env combinations +// These are active deployments that don't have scan data in image_scan_deploy_info +func (impl *ImageScanServiceImpl) fetchNonScannedAppEnvListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { + // Get non-scanned app-env combinations + nonScannedList, err := impl.imageScanDeployInfoRepository.GetNonScannedAppEnvCombinations(&request.ImageScanFilter, request.Size, request.Offset, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching non-scanned app-env combinations", "err", err) + return nil, err + } + + // Get total count + totalCount, err := impl.imageScanDeployInfoRepository.GetNonScannedAppEnvCombinationsCount(&request.ImageScanFilter, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching non-scanned app-env combinations count", "err", err) + return nil, err + } + + // Build response list + var finalResponseList []*bean3.ImageScanHistoryResponse + + // Get app and environment details + appIds := make([]int, 0) + envIds := make([]int, 0) + for _, item := range nonScannedList { + appIds = append(appIds, item.ScanObjectMetaId) + envIds = append(envIds, item.EnvId) + } + + // Extract unique IDs to avoid duplicate queries + appIds = sliceUtil.GetUniqueElements(appIds) + envIds = sliceUtil.GetUniqueElements(envIds) + + // Fetch app details + appMap := make(map[int]string) + if len(appIds) > 0 { + apps, err := impl.appRepository.FindAppAndProjectByIdsIn(appIds) + if err != nil && err != pg.ErrNoRows { + impl.Logger.Errorw("error while fetching apps", "err", err) + return nil, err + } + for _, app := range apps { + appMap[app.Id] = app.AppName + } + } + + // Fetch environment details using lightweight method + envMap := make(map[int]string) + if len(envIds) > 0 { + envMap, err = impl.envService.FindNamesByIds(envIds) + if err != nil && err != pg.ErrNoRows { + impl.Logger.Errorw("error while fetching environments", "err", err) + return nil, err + } + } + + // Build response items + for _, item := range nonScannedList { + response := &bean3.ImageScanHistoryResponse{ + ImageScanDeployInfoId: -1, // No scan deploy info exists + AppId: item.ScanObjectMetaId, + EnvId: item.EnvId, + Name: appMap[item.ScanObjectMetaId], + Type: item.ObjectType, + Environment: envMap[item.EnvId], + ScanStatus: string(securityBean.ScanStatusNotScanned), + SeverityCount: &bean3.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + } + finalResponseList = append(finalResponseList, response) + } + + return &bean3.ImageScanHistoryListingResponse{ + Offset: request.Offset, + Size: request.Size, + ImageScanHistoryResponse: finalResponseList, + Total: totalCount, + }, nil +} + func (impl ImageScanServiceImpl) fetchImageExecutionHistoryMapByIds(historyIds []int) (map[int]time.Time, error) { mapOfExecutionHistoryIdVsExecutionTime := make(map[int]time.Time) if len(historyIds) > 0 { @@ -782,3 +927,437 @@ func (impl ImageScanServiceImpl) fetchLatestArtifactMetadataDeployedOnAllEnvsAcr } return appEnvToCiArtifactMap, ciArtifactIdToScannedMap, nil } + +// FetchVulnerabilitySummary fetches the vulnerability summary for the given filters +// Same filters as VulnerabilityListing: Environment, Cluster, Application, Severity, Fix Availability, Vulnerability Age +// ids parameter contains RBAC-filtered deploy info IDs that the user has access to +func (impl *ImageScanServiceImpl) FetchVulnerabilitySummary(ctx context.Context, request *bean3.VulnerabilitySummaryRequest, ids []int) (*bean3.VulnerabilitySummary, error) { + ctx, span := otel.Tracer("imageScanService").Start(ctx, "FetchVulnerabilitySummary") + defer span.End() + + // Fetch raw vulnerability data with database-level filters (same as VulnerabilityListing) + // This applies: CVEName (empty for summary), Severity, EnvironmentIds, ClusterIds, AppIds, and RBAC-filtered deploy info IDs + rawData, err := impl.scanResultRepository.GetVulnerabilityRawData("", request.Severity, request.EnvironmentIds, request.ClusterIds, request.AppIds, ids) + if err != nil { + impl.Logger.Errorw("error while fetching vulnerability raw data", "err", err) + return nil, err + } + + if len(rawData) == 0 { + // Return empty summary + return &bean3.VulnerabilitySummary{ + TotalVulnerabilities: 0, + SeverityCount: &bean3.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + NotFixableVulnerabilities: 0, + }, nil + } + + // Build vulnerability map (deduplicate by CVE + App + Env + Package + Version + FixedVersion) + // Same logic as VulnerabilityListing + vulnerabilityMap := make(map[string]*bean3.VulnerabilityDetail) + for _, data := range rawData { + // Create unique key for this CVE+App+Env+Package+Version+FixedVersion combination + key := fmt.Sprintf("%s|%d|%d|%s|%s|%s", data.CveStoreName, data.AppId, data.EnvId, data.Package, data.CurrentVersion, data.FixedVersion) + + // Convert severity int to string + severityStr := impl.convertSeverityEnumToString(data.Severity) + + if existing, exists := vulnerabilityMap[key]; exists { + // Keep the earliest discovery time + if data.ExecutionTime.Before(existing.DiscoveredAt) { + existing.DiscoveredAt = data.ExecutionTime + } + } else { + vulnerabilityMap[key] = &bean3.VulnerabilityDetail{ + CVEName: data.CveStoreName, + Severity: severityStr, + AppName: data.AppName, + AppId: data.AppId, + EnvName: data.EnvName, + EnvId: data.EnvId, + DiscoveredAt: data.ExecutionTime, + Package: data.Package, + CurrentVersion: data.CurrentVersion, + FixedVersion: data.FixedVersion, + } + } + } + + // Convert map to slice + vulnerabilities := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilityMap)) + for _, vuln := range vulnerabilityMap { + vulnerabilities = append(vulnerabilities, vuln) + } + + // Apply code-level filters (FixAvailable and VulnAge) + // Same logic as VulnerabilityListing + vulnerabilities = impl.applyVulnerabilitySummaryFilters(vulnerabilities, request) + + // Calculate summary from filtered vulnerabilities + totalVulnerabilities := len(vulnerabilities) + totalFixableVulnerabilities := 0 + totalNotFixableVulnerabilities := 0 + summaryCriticalCount := 0 + summaryHighCount := 0 + summaryModerateCount := 0 + summaryLowCount := 0 + summaryUnknownCount := 0 + + for _, vuln := range vulnerabilities { + // Count by severity + switch strings.ToLower(vuln.Severity) { + case "critical": + summaryCriticalCount++ + case "high": + summaryHighCount++ + case "medium", "moderate": + summaryModerateCount++ + case "low": + summaryLowCount++ + default: + summaryUnknownCount++ + } + + // Count fixable vs not fixable + if vuln.FixedVersion != "" { + totalFixableVulnerabilities++ + } else { + totalNotFixableVulnerabilities++ + } + } + + // Build and return vulnerability summary + vulnerabilitySummary := &bean3.VulnerabilitySummary{ + TotalVulnerabilities: totalVulnerabilities, + SeverityCount: &bean3.SeverityCount{ + Critical: summaryCriticalCount, + High: summaryHighCount, + Medium: summaryModerateCount, + Low: summaryLowCount, + Unknown: summaryUnknownCount, + }, + FixableVulnerabilities: totalFixableVulnerabilities, + NotFixableVulnerabilities: totalNotFixableVulnerabilities, + } + + return vulnerabilitySummary, nil +} + +// FetchVulnerabilityListing fetches the vulnerability listing with pagination and filters +// Optimized version: Uses code-level aggregation instead of database GROUP BY +func (impl *ImageScanServiceImpl) FetchVulnerabilityListing(ctx context.Context, request *bean3.VulnerabilityListingRequest, ids []int) (*bean3.VulnerabilityListingResponse, error) { + ctx, span := otel.Tracer("imageScanService").Start(ctx, "FetchVulnerabilityListing") + defer span.End() + + rawData, err := impl.scanResultRepository.GetVulnerabilityRawData(request.CVEName, request.Severity, request.EnvironmentIds, request.ClusterIds, request.AppIds, ids) + if err != nil { + impl.Logger.Errorw("error while fetching vulnerability raw data", "err", err) + return nil, err + } + + // Code-level aggregation using maps for O(n) performance + // Key: "cveName|appId|envId|package|version|fixedVersion" + // This ensures unique combinations of CVE+App+Env+Package+Version + vulnerabilityMap := make(map[string]*bean3.VulnerabilityDetail) + + for _, data := range rawData { + // Create unique key for this CVE+App+Env+Package+Version combination + key := fmt.Sprintf("%s|%d|%d|%s|%s|%s", data.CveStoreName, data.AppId, data.EnvId, data.Package, data.CurrentVersion, data.FixedVersion) + + // Check if this combination already exists + if existing, exists := vulnerabilityMap[key]; exists { + // Keep the earliest discovery time + if data.ExecutionTime.Before(existing.DiscoveredAt) { + existing.DiscoveredAt = data.ExecutionTime + } + } else { + // New combination - add to map + vulnerabilityMap[key] = &bean3.VulnerabilityDetail{ + CVEName: data.CveStoreName, + Severity: impl.convertSeverityEnumToString(data.Severity), + AppName: data.AppName, + AppId: data.AppId, + EnvName: data.EnvName, + EnvId: data.EnvId, + DiscoveredAt: data.ExecutionTime, + Package: data.Package, + CurrentVersion: data.CurrentVersion, + FixedVersion: data.FixedVersion, + } + } + } + + // Convert map to slice + vulnerabilities := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilityMap)) + for _, vuln := range vulnerabilityMap { + vulnerabilities = append(vulnerabilities, vuln) + } + + // Apply code-level filters + vulnerabilities = impl.applyVulnerabilityFilters(vulnerabilities, request) + + // Apply sorting based on request + impl.sortVulnerabilities(vulnerabilities, request.SortBy, request.SortOrder) + + // Apply pagination in code + totalCount := len(vulnerabilities) + start := request.Offset + end := request.Offset + request.Size + + // Handle edge cases + if start > totalCount { + start = totalCount + } + if end > totalCount { + end = totalCount + } + if start < 0 { + start = 0 + } + + // Slice for pagination + paginatedVulnerabilities := vulnerabilities[start:end] + + return &bean3.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: totalCount, + Vulnerabilities: paginatedVulnerabilities, + }, nil +} + +// applyVulnerabilityFilters applies code-level filters (fix availability and vulnerability age) +func (impl *ImageScanServiceImpl) applyVulnerabilityFilters(vulnerabilities []*bean3.VulnerabilityDetail, request *bean3.VulnerabilityListingRequest) []*bean3.VulnerabilityDetail { + filtered := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilities)) + now := time.Now() + + for _, vuln := range vulnerabilities { + // Apply fix availability filter (multi-select) + if len(request.FixAvailability) > 0 { + hasFixedVersion := vuln.FixedVersion != "" + matchesFilter := false + + for _, fixAvailType := range request.FixAvailability { + if fixAvailType == bean3.FixAvailable && hasFixedVersion { + matchesFilter = true + break + } + if fixAvailType == bean3.FixNotAvailable && !hasFixedVersion { + matchesFilter = true + break + } + } + + if !matchesFilter { + continue + } + } + + // Apply vulnerability age filter (multi-select) + if len(request.AgeOfDiscovery) > 0 { + daysSinceDiscovery := int(now.Sub(vuln.DiscoveredAt).Hours() / 24) + matchesAgeFilter := false + + for _, ageType := range request.AgeOfDiscovery { + switch ageType { + case bean3.VulnAgeLessThan30Days: + if daysSinceDiscovery < 30 { + matchesAgeFilter = true + } + case bean3.VulnAge30To60Days: + if daysSinceDiscovery >= 30 && daysSinceDiscovery < 60 { + matchesAgeFilter = true + } + case bean3.VulnAge60To90Days: + if daysSinceDiscovery >= 60 && daysSinceDiscovery < 90 { + matchesAgeFilter = true + } + case bean3.VulnAgeMoreThan90Days: + if daysSinceDiscovery >= 90 { + matchesAgeFilter = true + } + } + + if matchesAgeFilter { + break + } + } + + if !matchesAgeFilter { + continue + } + } + + filtered = append(filtered, vuln) + } + + return filtered +} + +// applyVulnerabilitySummaryFilters applies code-level filters for summary (fix availability and vulnerability age) +// Same logic as applyVulnerabilityFilters but for VulnerabilitySummaryRequest +func (impl *ImageScanServiceImpl) applyVulnerabilitySummaryFilters(vulnerabilities []*bean3.VulnerabilityDetail, request *bean3.VulnerabilitySummaryRequest) []*bean3.VulnerabilityDetail { + filtered := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilities)) + now := time.Now() + + for _, vuln := range vulnerabilities { + // Apply fix availability filter (multi-select) + if len(request.FixAvailability) > 0 { + hasFixedVersion := vuln.FixedVersion != "" + matchesFilter := false + + for _, fixAvailType := range request.FixAvailability { + if fixAvailType == bean3.FixAvailable && hasFixedVersion { + matchesFilter = true + break + } + if fixAvailType == bean3.FixNotAvailable && !hasFixedVersion { + matchesFilter = true + break + } + } + + if !matchesFilter { + continue + } + } + + // Apply vulnerability age filter (multi-select) + if len(request.AgeOfDiscovery) > 0 { + daysSinceDiscovery := int(now.Sub(vuln.DiscoveredAt).Hours() / 24) + matchesAgeFilter := false + + for _, ageType := range request.AgeOfDiscovery { + switch ageType { + case bean3.VulnAgeLessThan30Days: + if daysSinceDiscovery < 30 { + matchesAgeFilter = true + } + case bean3.VulnAge30To60Days: + if daysSinceDiscovery >= 30 && daysSinceDiscovery < 60 { + matchesAgeFilter = true + } + case bean3.VulnAge60To90Days: + if daysSinceDiscovery >= 60 && daysSinceDiscovery < 90 { + matchesAgeFilter = true + } + case bean3.VulnAgeMoreThan90Days: + if daysSinceDiscovery >= 90 { + matchesAgeFilter = true + } + } + + if matchesAgeFilter { + break + } + } + + if !matchesAgeFilter { + continue + } + } + + filtered = append(filtered, vuln) + } + + return filtered +} + +// sortVulnerabilities sorts vulnerabilities based on sortBy and sortOrder +func (impl *ImageScanServiceImpl) sortVulnerabilities(vulnerabilities []*bean3.VulnerabilityDetail, sortBy bean3.VulnerabilitySortBy, sortOrder bean3.SortOrder) { + // Default sort: discoveredAt DESC, cveName ASC + if sortBy == "" { + sortBy = bean3.VulnSortByDiscoveredAt + } + if sortOrder == "" { + sortOrder = bean3.SortOrderDesc + } + + slices.SortFunc(vulnerabilities, func(a, b *bean3.VulnerabilityDetail) int { + var cmp int + + switch sortBy { + case bean3.VulnSortByCveName: + if a.CVEName < b.CVEName { + cmp = -1 + } else if a.CVEName > b.CVEName { + cmp = 1 + } + case bean3.VulnSortByCurrentVersion: + if a.CurrentVersion < b.CurrentVersion { + cmp = -1 + } else if a.CurrentVersion > b.CurrentVersion { + cmp = 1 + } + case bean3.VulnSortByFixedVersion: + if a.FixedVersion < b.FixedVersion { + cmp = -1 + } else if a.FixedVersion > b.FixedVersion { + cmp = 1 + } + case bean3.VulnSortByDiscoveredAt: + if a.DiscoveredAt.Before(b.DiscoveredAt) { + cmp = -1 + } else if a.DiscoveredAt.After(b.DiscoveredAt) { + cmp = 1 + } + case bean3.VulnSortBySeverity: + // Severity comparison: Critical > High > Medium > Low > Unknown + severityOrder := map[string]int{ + securityBean.CRITICAL: 4, + securityBean.HIGH: 3, + securityBean.MEDIUM: 2, + securityBean.LOW: 1, + securityBean.UNKNOWN: 0, + } + aSev := severityOrder[a.Severity] + bSev := severityOrder[b.Severity] + if aSev < bSev { + cmp = -1 + } else if aSev > bSev { + cmp = 1 + } + default: + // Default to discoveredAt + if a.DiscoveredAt.Before(b.DiscoveredAt) { + cmp = -1 + } else if a.DiscoveredAt.After(b.DiscoveredAt) { + cmp = 1 + } + } + + // Apply sort order + if sortOrder == bean3.SortOrderDesc { + cmp = -cmp + } + + return cmp + }) +} + +// convertSeverityEnumToString converts severity enum to string +func (impl *ImageScanServiceImpl) convertSeverityEnumToString(severity int) string { + switch severity { + case int(securityBean.Low): + return securityBean.LOW + case int(securityBean.Medium): + return securityBean.MEDIUM + case int(securityBean.High): + return securityBean.HIGH + case int(securityBean.Critical): + return securityBean.CRITICAL + case int(securityBean.Safe): + return securityBean.SAFE + case int(securityBean.Unknown): + return securityBean.UNKNOWN + default: + return securityBean.UNKNOWN + } +} diff --git a/pkg/policyGovernance/security/imageScanning/bean/bean.go b/pkg/policyGovernance/security/imageScanning/bean/bean.go index b250f8aff5..702386a817 100644 --- a/pkg/policyGovernance/security/imageScanning/bean/bean.go +++ b/pkg/policyGovernance/security/imageScanning/bean/bean.go @@ -1,10 +1,12 @@ package bean import ( + "time" + + workflowConstants "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/constants" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/helper/parser" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" - "time" ) const ( @@ -75,15 +77,111 @@ type ImageScanHistoryListingResponse struct { } type ImageScanHistoryResponse struct { - ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` - AppId int `json:"appId"` - EnvId int `json:"envId"` - Name string `json:"name"` - Type string `json:"type"` - Environment string `json:"environment"` - LastChecked *time.Time `json:"lastChecked"` - Image string `json:"image,omitempty"` - SeverityCount *SeverityCount `json:"severityCount,omitempty"` + ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` + AppId int `json:"appId"` + EnvId int `json:"envId"` + Name string `json:"name"` + Type string `json:"type"` + Environment string `json:"environment"` + LastChecked *time.Time `json:"lastChecked"` + Image string `json:"image,omitempty"` + SeverityCount *SeverityCount `json:"severityCount,omitempty"` + FixableVulnerabilities int `json:"fixableVulnerabilities"` + ScanStatus string `json:"scanStatus,omitempty"` // "scanned" or "not-scanned" +} + +// VulnerabilitySummary represents the summary of all vulnerabilities across all scanned apps/envs +type VulnerabilitySummary struct { + TotalVulnerabilities int `json:"totalVulnerabilities"` + SeverityCount *SeverityCount `json:"severityCount"` + FixableVulnerabilities int `json:"fixableVulnerabilities"` + NotFixableVulnerabilities int `json:"notFixableVulnerabilities"` +} + +// VulnerabilitySummaryRequest represents the request for vulnerability summary with filters +// Same filters as VulnerabilityListingRequest (except pagination and sorting) +type VulnerabilitySummaryRequest struct { + EnvironmentIds []int `json:"envIds"` // Filter by environment IDs + ClusterIds []int `json:"clusterIds"` // Filter by cluster IDs + AppIds []int `json:"appIds"` // Filter by application IDs + Severity []int `json:"severity"` // Filter by severity + FixAvailability []FixAvailabilityType `json:"fixAvailability"` // Filter by fix availability (multi-select: fixAvailable, fixNotAvailable) + AgeOfDiscovery []VulnerabilityAgeType `json:"ageOfDiscovery"` // Filter by vulnerability age (multi-select) +} + +// VulnerabilityListingRequest represents the request for vulnerability listing with filters +type VulnerabilityListingRequest struct { + CVEName string `json:"cveName"` // Search by CVE name + Severity []int `json:"severity"` // Filter by severity + EnvironmentIds []int `json:"envIds"` // Filter by environment IDs + ClusterIds []int `json:"clusterIds"` // Filter by cluster IDs + AppIds []int `json:"appIds"` // Filter by application IDs + FixAvailability []FixAvailabilityType `json:"fixAvailability"` // Filter by fix availability (multi-select: fixAvailable, fixNotAvailable) + AgeOfDiscovery []VulnerabilityAgeType `json:"ageOfDiscovery"` // Filter by vulnerability age (multi-select) + SortBy VulnerabilitySortBy `json:"sortBy"` // Sort by field + SortOrder SortOrder `json:"sortOrder"` // Sort order (ASC/DESC) + Offset int `json:"offset"` // Pagination offset + Size int `json:"size"` // Pagination size +} + +// FixAvailabilityType represents fix availability filter options +type FixAvailabilityType string + +const ( + FixAvailable FixAvailabilityType = "fixAvailable" // CVEs with fixes available + FixNotAvailable FixAvailabilityType = "fixNotAvailable" // CVEs without fixes +) + +// VulnerabilityAgeType represents vulnerability age filter +type VulnerabilityAgeType string + +const ( + VulnAgeLessThan30Days VulnerabilityAgeType = "lt_30d" // Less than 30 days old + VulnAge30To60Days VulnerabilityAgeType = "30_60d" // 30 to 60 days old + VulnAge60To90Days VulnerabilityAgeType = "60_90d" // 60 to 90 days old + VulnAgeMoreThan90Days VulnerabilityAgeType = "gt_90d" // More than 90 days old +) + +// VulnerabilitySortBy represents sort field for vulnerability listing +type VulnerabilitySortBy string + +const ( + VulnSortByCveName VulnerabilitySortBy = "cveName" + VulnSortByCurrentVersion VulnerabilitySortBy = "currentVersion" + VulnSortByFixedVersion VulnerabilitySortBy = "fixedVersion" + VulnSortByDiscoveredAt VulnerabilitySortBy = "discoveredAt" + VulnSortBySeverity VulnerabilitySortBy = "severity" +) + +// SortOrder represents sort order +// Type alias to repository constants to avoid circular imports +type SortOrder = workflowConstants.SortOrder + +const ( + SortOrderAsc = workflowConstants.SortOrderAsc + SortOrderDesc = workflowConstants.SortOrderDesc +) + +// VulnerabilityListingResponse represents the response for vulnerability listing +type VulnerabilityListingResponse struct { + Offset int `json:"offset"` + Size int `json:"size"` + Total int `json:"total"` + Vulnerabilities []*VulnerabilityDetail `json:"list"` +} + +// VulnerabilityDetail represents detailed information about a single CVE +type VulnerabilityDetail struct { + CVEName string `json:"cveName"` + Severity string `json:"severity"` + AppName string `json:"appName"` + AppId int `json:"appId"` + EnvName string `json:"envName"` + EnvId int `json:"envId"` + DiscoveredAt time.Time `json:"discoveredAt"` // First time this CVE was discovered + Package string `json:"package"` // Vulnerable package name + CurrentVersion string `json:"currentVersion"` // Current vulnerable version + FixedVersion string `json:"fixedVersion"` // Fixed version (empty if not fixable) } type ImageScanExecutionDetail struct { diff --git a/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go b/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go index 58f6dce98f..e91768cb3e 100644 --- a/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go +++ b/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go @@ -65,6 +65,11 @@ type ImageScanListingResponse struct { TotalCount int `json:"totalCount"` } +type DeploymentScannedCount struct { + ScannedCount int + UnscannedCount int +} + type ImageScanDeployInfoRepository interface { Save(model *ImageScanDeployInfo) error FindAll() ([]*ImageScanDeployInfo, error) @@ -75,6 +80,13 @@ type ImageScanDeployInfoRepository interface { FetchByAppIdAndEnvId(appId int, envId int, objectType []string) (*ImageScanDeployInfo, error) FindByTypeMetaAndTypeId(scanObjectMetaId int, objectType string) (*ImageScanDeployInfo, error) ScanListingWithFilter(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) + + // Security Overview methods + GetActiveDeploymentCountByFilters(envIds, clusterIds, appIds []int) (int, error) + GetActiveDeploymentCountWithVulnerabilitiesByFilters(envIds, clusterIds, appIds []int) (int, error) + GetActiveDeploymentScannedUnscannedCountByFilters(envIds, clusterIds, appIds []int) (*DeploymentScannedCount, error) + GetNonScannedAppEnvCombinations(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanDeployInfo, error) + GetNonScannedAppEnvCombinationsCount(request *repoBean.ImageScanFilter, deployInfoIds []int) (int, error) } type ImageScanDeployInfoRepositoryImpl struct { @@ -289,3 +301,335 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListingQueryBuilder(request *r } return query, queryParams } + +// ============================================================================ +// Security Overview Methods +// ============================================================================ + +// GetActiveDeploymentCountByFilters returns the count of unique active deployments (app+env combinations) +// filtered by envIds, clusterIds, and appIds +// Uses cd_workflow_runner as the source of truth for ALL deployments (scanned and unscanned) +func (impl ImageScanDeployInfoRepositoryImpl) GetActiveDeploymentCountByFilters(envIds, clusterIds, appIds []int) (int, error) { + // Query to find latest deployment per app+environment combination from cd_workflow_runner + // This is the source of truth for ALL active deployments, not just scanned ones + // Partitions by (app_id, environment_id) to get the most recent deployment for each app+env + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and count unique app+env combinations + query += ` + ) + SELECT COUNT(DISTINCT (app_id, environment_id)) + FROM LatestDeployments + WHERE rn = 1 + ` + + var count int + _, err := impl.dbConnection.Query(&count, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting active deployment count", "err", err) + return 0, err + } + + return count, nil +} + +// GetActiveDeploymentCountWithVulnerabilitiesByFilters returns the count of unique active deployments +// that have vulnerabilities in their LATEST deployed artifact +func (impl ImageScanDeployInfoRepositoryImpl) GetActiveDeploymentCountWithVulnerabilitiesByFilters(envIds, clusterIds, appIds []int) (int, error) { + // Query to find latest deployment per app+environment combination and check if it has vulnerabilities + // Partitions by (app_id, environment_id) to get the most recent deployment for each app+env + // This handles cases where pipelines are deleted and recreated for the same app+env + // Shows vulnerability data from all deployments (successful or failed) since vulnerability is about the image, not deployment status + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and count deployments with vulnerabilities + // Join with image_scan_deploy_info to verify scanned deployments + // Then join with image_scan_execution_history using both id and image for verification + query += ` + ) + SELECT COUNT(DISTINCT (ld.app_id, ld.environment_id)) + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + WHERE ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + ` + + var count int + _, err := impl.dbConnection.Query(&count, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting deployment count with vulnerabilities", "err", err) + return 0, err + } + + return count, nil +} + +// GetActiveDeploymentScannedUnscannedCountByFilters returns the count of scanned and unscanned deployments +// in a single query for optimal performance. It finds the latest deployed artifact per app+env combination +// and counts how many have scanned=true vs scanned=false +func (impl ImageScanDeployInfoRepositoryImpl) GetActiveDeploymentScannedUnscannedCountByFilters(envIds, clusterIds, appIds []int) (*DeploymentScannedCount, error) { + // Query to find latest deployment per app+environment combination and count scanned vs unscanned + // Uses ROW_NUMBER() to get the latest deployment per app+env + // Partitions by (app_id, environment_id) to get the most recent deployment for each app+env + // This handles cases where pipelines are deleted and recreated for the same app+env + // Shows scan data from all deployments (successful or failed) since scan status is about the image, not deployment status + // Then uses conditional aggregation to count scanned and unscanned in one query + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + cia.scanned, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + INNER JOIN environment env ON env.id = p.environment_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + query += ` + ) + SELECT + COUNT(*) FILTER (WHERE scanned = true) as scanned_count, + COUNT(*) FILTER (WHERE scanned = false) as unscanned_count + FROM LatestDeployments + WHERE rn = 1 + ` + + type queryResult struct { + ScannedCount int `pg:"scanned_count"` + UnscannedCount int `pg:"unscanned_count"` + } + + var result queryResult + _, err := impl.dbConnection.Query(&result, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting deployment scanned/unscanned counts", "err", err) + return nil, err + } + + return &DeploymentScannedCount{ + ScannedCount: result.ScannedCount, + UnscannedCount: result.UnscannedCount, + }, nil +} + +// GetNonScannedAppEnvCombinations returns app-env combinations that are NOT scanned +// It finds all active deployments and excludes those that exist in image_scan_deploy_info +func (impl ImageScanDeployInfoRepositoryImpl) GetNonScannedAppEnvCombinations(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanDeployInfo, error) { + query, queryParams := impl.buildNonScannedAppEnvQuery(request, size, offset, deployInfoIds, false) + + var results []*ImageScanDeployInfo + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting non-scanned app-env combinations", "err", err) + return nil, err + } + + return results, nil +} + +// GetNonScannedAppEnvCombinationsCount returns count of app-env combinations that are NOT scanned +func (impl ImageScanDeployInfoRepositoryImpl) GetNonScannedAppEnvCombinationsCount(request *repoBean.ImageScanFilter, deployInfoIds []int) (int, error) { + query, queryParams := impl.buildNonScannedAppEnvQuery(request, 0, 0, deployInfoIds, true) + + var count int + _, err := impl.dbConnection.Query(&count, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting non-scanned app-env combinations count", "err", err) + return 0, err + } + + return count, nil +} + +// buildNonScannedAppEnvQuery builds query to find non-scanned app-env combinations +// It gets all active deployments from cd_workflow_runner and excludes those in image_scan_deploy_info +func (impl ImageScanDeployInfoRepositoryImpl) buildNonScannedAppEnvQuery(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int, isCountQuery bool) (string, []interface{}) { + var queryParams []interface{} + + // Build the CTE to get latest deployments + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + // Add filters to CTE + if len(request.EnvironmentIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(request.EnvironmentIds)) + } + + if len(request.ClusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(request.ClusterIds)) + } + + query += ` + ) + ` + + // Main query - select non-scanned app-env combinations + if isCountQuery { + query += ` + SELECT COUNT(*) + FROM LatestDeployments ld + INNER JOIN app a ON a.id = ld.app_id + INNER JOIN environment env ON env.id = ld.environment_id + LEFT JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + WHERE ld.rn = 1 + AND a.active = true + AND env.active = true + AND (isdi.id IS NULL OR isdi.image_scan_execution_history_id[1] = -1) + ` + } else { + query += ` + SELECT + ld.app_id as scan_object_meta_id, + ld.environment_id as env_id, + ld.cluster_id, + 'app' as object_type, + -1 as id + FROM LatestDeployments ld + INNER JOIN app a ON a.id = ld.app_id + INNER JOIN environment env ON env.id = ld.environment_id + LEFT JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + WHERE ld.rn = 1 + AND a.active = true + AND env.active = true + AND (isdi.id IS NULL OR isdi.image_scan_execution_history_id[1] = -1) + ` + } + + // Add app name filter if provided + if len(request.AppName) > 0 { + query += " AND a.app_name ILIKE ?" + queryParams = append(queryParams, util.GetLIKEClauseQueryParam(request.AppName)) + } + + // Add deployInfoIds filter if provided (for RBAC) + if len(deployInfoIds) > 0 && !isCountQuery { + // For non-scanned items, we can't filter by deployInfoIds since they don't exist in image_scan_deploy_info + // This filter is only applicable for scanned items + } + + // Add pagination for non-count queries + if !isCountQuery && size > 0 { + query += " ORDER BY ld.app_id, ld.environment_id LIMIT ? OFFSET ?" + queryParams = append(queryParams, size, offset) + } + + return query, queryParams +} diff --git a/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go b/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go index 06458de34c..7251f57dfa 100644 --- a/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go +++ b/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go @@ -17,6 +17,8 @@ package repository import ( + "time" + "github.com/go-pg/pg" "go.uber.org/zap" ) @@ -37,6 +39,51 @@ type ImageScanExecutionResult struct { ImageScanExecutionHistory ImageScanExecutionHistory } +type VulnerabilityData struct { + CveStoreName string + FixedVersion string +} + +type SeverityInsightData struct { + CveStoreName string + Severity int // Severity enum value from cve_store + ExecutionTime time.Time // From image_scan_execution_history +} + +type VulnerabilityTrendData struct { + CveStoreName string + Severity int // Severity enum value from cve_store + ExecutionTime time.Time // From image_scan_execution_history +} + +type VulnerabilityListingData struct { + CveStoreName string `sql:"cve_store_name"` + Severity int `sql:"severity"` + AppId int `sql:"app_id"` + AppName string `sql:"app_name"` + EnvId int `sql:"env_id"` + EnvName string `sql:"env_name"` + DiscoveredAt time.Time `sql:"discovered_at"` + Package string `sql:"package"` + CurrentVersion string `sql:"current_version"` + FixedVersion string `sql:"fixed_version"` + TotalCount int `sql:"total_count"` +} + +// VulnerabilityRawData represents raw CVE data before aggregation (for code-level optimization) +type VulnerabilityRawData struct { + CveStoreName string `sql:"cve_store_name"` + Severity int `sql:"severity"` + AppId int `sql:"app_id"` + AppName string `sql:"app_name"` + EnvId int `sql:"env_id"` + EnvName string `sql:"env_name"` + ExecutionTime time.Time `sql:"execution_time"` + Package string `sql:"package"` + CurrentVersion string `sql:"current_version"` + FixedVersion string `sql:"fixed_version"` +} + type ImageScanResultRepository interface { Save(model *ImageScanExecutionResult) error FindAll() ([]*ImageScanExecutionResult, error) @@ -48,6 +95,14 @@ type ImageScanResultRepository interface { FindByImageDigest(imageDigest string) ([]*ImageScanExecutionResult, error) FindByImageDigests(digest []string) ([]*ImageScanExecutionResult, error) FindByImage(image string) ([]*ImageScanExecutionResult, error) + + // Security Overview methods + GetVulnerabilitiesWithFixedVersionByFilters(envIds, clusterIds, appIds []int) ([]*VulnerabilityData, error) + GetSeverityInsightDataByFilters(envIds, clusterIds, appIds []int, isProd *bool) ([]*SeverityInsightData, error) + GetVulnerabilityTrendDataByFilters(from, to *time.Time, isProd *bool) ([]*VulnerabilityTrendData, error) + + // Vulnerability Listing + GetVulnerabilityRawData(cveName string, severities, envIds, clusterIds, appIds, deployInfoIds []int) ([]*VulnerabilityRawData, error) } type ImageScanResultRepositoryImpl struct { @@ -133,3 +188,348 @@ func (impl ImageScanResultRepositoryImpl) FindByImage(image string) ([]*ImageSca Where("image_scan_execution_history.image = ?", image).Order("image_scan_execution_history.execution_time desc").Select() return model, err } + +// ============================================================================ +// Security Overview Methods +// ============================================================================ + +func (impl ImageScanResultRepositoryImpl) GetVulnerabilitiesWithFixedVersionByFilters(envIds, clusterIds, appIds []int) ([]*VulnerabilityData, error) { + var results []*VulnerabilityData + + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and join with image_scan_deploy_info to get only scanned deployments + // Then fetch vulnerabilities from image_scan_execution_result + query += ` + ) + SELECT + iser.cve_store_name, + iser.fixed_version + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = isdi.image_scan_execution_history_id[1] + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + WHERE ld.image = iseh.image + AND ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + ` + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting vulnerabilities with fixed version", "err", err) + return nil, err + } + + return results, nil +} + +// GetSeverityInsightDataByFilters returns vulnerability data with severity and execution time +// for calculating severity distribution and age distribution in a single query +// Only returns vulnerabilities from the LATEST deployed artifact for each app+env combination +// isProd: nil = all environments, true = prod only, false = non-prod only +func (impl ImageScanResultRepositoryImpl) GetSeverityInsightDataByFilters(envIds, clusterIds, appIds []int, isProd *bool) ([]*SeverityInsightData, error) { + var results []*SeverityInsightData + + // Query to get vulnerabilities from latest deployed images per app+env + // Step 1: Get latest deployment per app+env from cd_workflow_runner (source of truth for all deployments) + // Step 2: Join with image_scan_deploy_info to verify if this app+env has scanned image deployed + // image_scan_deploy_info contains env_id mapping and scan_execution_history_id for scanned images + // For object_type='app', the array image_scan_execution_history_id has length 1 (current deployed image's scan) + // Step 3: Get execution_time from image_scan_execution_history for age distribution + // Step 4: Fetch vulnerabilities with severity from image_scan_execution_result + // Images without scan data (not in image_scan_deploy_info) will not appear in results (zero vulnerabilities) + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add prod/non-prod filter only if isProd is not nil + if isProd != nil { + query += " AND env.default = ?" + queryParams = append(queryParams, *isProd) + } + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and join with image_scan_deploy_info to get only scanned deployments + // Then fetch vulnerabilities with severity and execution_time + query += ` + ) + SELECT + iser.cve_store_name, + COALESCE(cs.standard_severity, cs.severity) as severity, + iseh.execution_time + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + INNER JOIN cve_store cs ON cs.name = iser.cve_store_name + WHERE ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + ` + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting severity insight data", "err", err, "isProd", isProd) + return nil, err + } + + return results, nil +} + +// GetVulnerabilityTrendDataByFilters returns vulnerability data with severity and execution time +// for calculating time-series vulnerability trend grouped by severity +// Only returns vulnerabilities from the LATEST deployed artifact for each app+env combination +// isProd: nil = all environments, true = prod only, false = non-prod only +func (impl ImageScanResultRepositoryImpl) GetVulnerabilityTrendDataByFilters(from, to *time.Time, isProd *bool) ([]*VulnerabilityTrendData, error) { + var results []*VulnerabilityTrendData + + // Query to get vulnerabilities from latest deployed images per app+env + // Step 1: Get latest deployment per app+env from cd_workflow_runner (source of truth for all deployments) + // Step 2: Join with image_scan_deploy_info to verify if this app+env has scanned image deployed + // image_scan_deploy_info contains env_id mapping and scan_execution_history_id for scanned images + // For object_type='app', the array image_scan_execution_history_id has length 1 (current deployed image's scan) + // Step 3: Get execution_time from image_scan_execution_history for trend analysis + // Step 4: Fetch vulnerabilities with severity from image_scan_execution_result + // Images without scan data (not in image_scan_deploy_info) will not appear in results (zero vulnerabilities) + // Filters by execution_time range for trend analysis + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add prod/non-prod filter only if isProd is not nil + if isProd != nil { + query += " AND env.default = ?" + queryParams = append(queryParams, *isProd) + } + + // Complete the CTE and join with image_scan_deploy_info to get only scanned deployments + // Then fetch vulnerabilities with severity and execution_time, filtered by time range + query += ` + ) + SELECT + iser.cve_store_name, + COALESCE(cs.standard_severity, cs.severity) as severity, + iseh.execution_time + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + INNER JOIN cve_store cs ON cs.name = iser.cve_store_name + WHERE ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + AND iseh.execution_time >= ? AND iseh.execution_time <= ? + ` + + queryParams = append(queryParams, from, to) + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting vulnerability trend data", "err", err, "from", from, "to", to, "isProd", isProd) + return nil, err + } + + return results, nil +} + +func (impl ImageScanResultRepositoryImpl) GetVulnerabilityRawData(cveName string, severities, envIds, clusterIds, appIds, deployInfoIds []int) ([]*VulnerabilityRawData, error) { + var results []*VulnerabilityRawData + + query := ` + WITH LatestDeployments AS ( + SELECT DISTINCT ON (p.app_id, p.environment_id) + p.app_id, + a.app_name, + p.environment_id, + env.environment_name as env_name, + env.cluster_id, + cia.image + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN app a ON a.id = p.app_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND a.active = true + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + query += ` + ORDER BY p.app_id, p.environment_id, cwr.id DESC + ) + SELECT + iser.cve_store_name, + COALESCE(cs.standard_severity, cs.severity) as severity, + ld.app_id, + ld.app_name, + ld.environment_id as env_id, + ld.env_name, + iseh.execution_time, + iser.package, + iser.version as current_version, + iser.fixed_version + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + AND isdi.image_scan_execution_history_id[1] != -1 + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + ` + + // Add CVE name filter + if cveName != "" { + query += " AND iser.cve_store_name ILIKE ?" + queryParams = append(queryParams, "%"+cveName+"%") + } + + query += ` + INNER JOIN cve_store cs ON cs.name = iser.cve_store_name + ` + + // Add RBAC filter for deploy info IDs + if len(deployInfoIds) > 0 { + query += " WHERE isdi.id = ANY(?)" + queryParams = append(queryParams, pg.Array(deployInfoIds)) + + // Add severity filter with AND since WHERE already exists + if len(severities) > 0 { + query += " AND COALESCE(cs.standard_severity, cs.severity) = ANY(?)" + queryParams = append(queryParams, pg.Array(severities)) + } + } else { + // Add severity filter with WHERE since no deploy info filter + if len(severities) > 0 { + query += " WHERE COALESCE(cs.standard_severity, cs.severity) = ANY(?)" + queryParams = append(queryParams, pg.Array(severities)) + } + } + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting vulnerability raw data", "err", err, "cveName", cveName, "severities", severities) + return nil, err + } + + return results, nil +} diff --git a/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go b/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go index be4d20a578..9469c12b8a 100644 --- a/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go +++ b/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go @@ -112,12 +112,13 @@ type ImageScanFilter struct { CVEName string `json:"cveName"` AppName string `json:"appName"` // ObjectName deprecated - ObjectName string `json:"objectName"` - EnvironmentIds []int `json:"envIds"` - ClusterIds []int `json:"clusterIds"` - Severity []int `json:"severity"` - SortOrder SortOrder `json:"sortOrder"` - SortBy SortBy `json:"sortBy"` // sort by objectName,envName,lastChecked + ObjectName string `json:"objectName"` + EnvironmentIds []int `json:"envIds"` + ClusterIds []int `json:"clusterIds"` + Severity []int `json:"severity"` + SortOrder SortOrder `json:"sortOrder"` + SortBy SortBy `json:"sortBy"` // sort by objectName,envName,lastChecked + ScanStatus ScanStatusType `json:"scanStatus,omitempty"` } type SortBy string @@ -127,3 +128,12 @@ const ( Asc SortOrder = "ASC" Desc SortOrder = "DESC" ) + +// ScanStatusType represents the scan status filter +type ScanStatusType string + +const ( + ScanStatusAll ScanStatusType = "" // default - show all (scanned + not-scanned) + ScanStatusScanned ScanStatusType = "scanned" + ScanStatusNotScanned ScanStatusType = "not-scanned" +) diff --git a/pkg/team/repository/TeamRepository.go b/pkg/team/repository/TeamRepository.go index d3e0695956..6947024455 100644 --- a/pkg/team/repository/TeamRepository.go +++ b/pkg/team/repository/TeamRepository.go @@ -17,6 +17,8 @@ package repository import ( + "time" + "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" ) @@ -32,6 +34,7 @@ type Team struct { type TeamRepository interface { Save(team *Team) error FindAllActive() ([]Team, error) + FindAllActiveInTimeRange(from, to *time.Time) ([]Team, error) FindOne(id int) (Team, error) FindByTeamName(name string) (Team, error) Update(team *Team) error @@ -108,3 +111,18 @@ func (impl TeamRepositoryImpl) FindByIds(ids []*int) ([]*Team, error) { func (impl TeamRepositoryImpl) GetConnection() *pg.DB { return impl.dbConnection } + +func (impl TeamRepositoryImpl) FindAllActiveInTimeRange(from, to *time.Time) ([]Team, error) { + var teams []Team + query := impl.dbConnection.Model(&teams).Where("active = ?", true) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + err := query.Select() + return teams, err +} diff --git a/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb273..74b7480a33 100644 --- a/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,12 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +185,8 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +229,8 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 0000000000..3727650148 --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f721..2bbfa1dc10 100644 --- a/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/vendor/github.com/devtron-labs/common-lib/utils/reflectUtils/ReflectUtil.go b/vendor/github.com/devtron-labs/common-lib/utils/reflectUtils/ReflectUtil.go new file mode 100644 index 0000000000..60de9d97f9 --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/utils/reflectUtils/ReflectUtil.go @@ -0,0 +1,14 @@ +package reflectUtils + +import "reflect" + +func IsNullableValue(field reflect.Value) bool { + kind := field.Kind() + switch kind { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.UnsafePointer, + reflect.Interface, reflect.Slice: + return true + default: //other types can not be nil + return false + } +} diff --git a/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go b/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go index fa6858e5de..5c9cb23bfd 100644 --- a/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go +++ b/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/vendor/github.com/devtron-labs/common-lib/workerPool/workerpool.go b/vendor/github.com/devtron-labs/common-lib/workerPool/workerpool.go new file mode 100644 index 0000000000..5750d6e1d9 --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/workerPool/workerpool.go @@ -0,0 +1,116 @@ +package workerPool + +import ( + "fmt" + "github.com/devtron-labs/common-lib/constants" + "github.com/devtron-labs/common-lib/pubsub-lib/metrics" + "github.com/devtron-labs/common-lib/utils/reflectUtils" + "github.com/devtron-labs/common-lib/utils/runTime" + "github.com/gammazero/workerpool" + "go.uber.org/zap" + "reflect" + "runtime/debug" + "sync" +) + +type WorkerPool[T any] struct { + logger *zap.SugaredLogger + service constants.ServiceName + wp *workerpool.WorkerPool + mu *sync.Mutex + err chan error + response []T + includeZeroValue bool +} + +func NewWorkerPool[T any](maxWorkers int, serviceName constants.ServiceName, logger *zap.SugaredLogger) *WorkerPool[T] { + wp := &WorkerPool[T]{ + logger: logger, + service: serviceName, + wp: workerpool.New(maxWorkers), + mu: &sync.Mutex{}, + err: make(chan error, 1), + } + return wp +} + +func (wp *WorkerPool[T]) InitializeResponse() *WorkerPool[T] { + wp.response = []T{} + return wp +} + +func (wp *WorkerPool[T]) IncludeZeroValue() *WorkerPool[T] { + wp.includeZeroValue = true + return wp +} + +func (wp *WorkerPool[T]) Submit(task func() (T, error)) { + if task == nil { + return + } + wp.wp.Submit(func() { + defer func() { + if r := recover(); r != nil { + metrics.IncPanicRecoveryCount("go-routine", wp.service.ToString(), runTime.GetCallerFunctionName(), fmt.Sprintf("%s:%d", runTime.GetCallerFileName(), runTime.GetCallerLineNumber())) + wp.logger.Errorw(fmt.Sprintf("%s %s", constants.GoRoutinePanicMsgLogPrefix, "go-routine recovered from panic"), "err", r, "stack", string(debug.Stack())) + } + }() + if wp.Error() != nil { + return + } + res, err := task() + if err != nil { + wp.logger.Errorw("error in worker pool task", "err", err) + wp.setError(err) + return + } + wp.updateResponse(res) + }) +} + +func (wp *WorkerPool[T]) updateResponse(res T) { + wp.lock() + defer wp.unlock() + val := reflect.ValueOf(res) + if reflectUtils.IsNullableValue(val) && val.IsNil() { + return + } else if !wp.includeZeroValue && val.IsZero() { + return + } else { + wp.response = append(wp.response, res) + return + } +} + +func (wp *WorkerPool[_]) StopWait() error { + wp.wp.StopWait() + // return error from workerPool error channel + return wp.Error() +} + +func (wp *WorkerPool[_]) lock() { + wp.mu.Lock() +} + +func (wp *WorkerPool[_]) unlock() { + wp.mu.Unlock() +} + +func (wp *WorkerPool[_]) Error() error { + select { + case err := <-wp.err: + return err + default: + return nil + } +} + +func (wp *WorkerPool[_]) setError(err error) { + if err != nil && wp.Error() == nil { + wp.err <- err + } +} + +func (wp *WorkerPool[T]) GetResponse() []T { + return wp.response +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 36aab7c6df..b0b3c76f93 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -523,7 +523,7 @@ github.com/davecgh/go-spew/spew # github.com/deckarep/golang-set v1.8.0 ## explicit; go 1.17 github.com/deckarep/golang-set -# github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251121075820-d6692a4fd1f2 +# github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251201122208-2efa348401af ## explicit; go 1.24.0 github.com/devtron-labs/authenticator/apiToken github.com/devtron-labs/authenticator/client @@ -531,7 +531,7 @@ github.com/devtron-labs/authenticator/jwt github.com/devtron-labs/authenticator/middleware github.com/devtron-labs/authenticator/oidc github.com/devtron-labs/authenticator/password -# github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251121075820-d6692a4fd1f2 +# github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251201122208-2efa348401af ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/blob-storage @@ -559,6 +559,7 @@ github.com/devtron-labs/common-lib/utils/k8s/commonBean github.com/devtron-labs/common-lib/utils/k8s/configMap github.com/devtron-labs/common-lib/utils/k8s/health github.com/devtron-labs/common-lib/utils/k8sObjectsUtil +github.com/devtron-labs/common-lib/utils/reflectUtils github.com/devtron-labs/common-lib/utils/registry github.com/devtron-labs/common-lib/utils/remoteConnection/bean github.com/devtron-labs/common-lib/utils/retryFunc @@ -566,6 +567,7 @@ github.com/devtron-labs/common-lib/utils/runTime github.com/devtron-labs/common-lib/utils/sql github.com/devtron-labs/common-lib/utils/workFlow github.com/devtron-labs/common-lib/utils/yaml +github.com/devtron-labs/common-lib/workerPool github.com/devtron-labs/common-lib/workflow # github.com/devtron-labs/go-bitbucket v0.9.60-beta ## explicit; go 1.14 @@ -2673,5 +2675,5 @@ xorm.io/xorm/log xorm.io/xorm/names xorm.io/xorm/schemas xorm.io/xorm/tags -# github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251121075820-d6692a4fd1f2 -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251121075820-d6692a4fd1f2 +# github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251201122208-2efa348401af +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251201122208-2efa348401af diff --git a/wire_gen.go b/wire_gen.go index c099099491..10b9cebeca 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -233,6 +233,9 @@ import ( "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/module/store" "github.com/devtron-labs/devtron/pkg/notifier" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/cache" + config5 "github.com/devtron-labs/devtron/pkg/overview/config" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/draftAwareConfigService" "github.com/devtron-labs/devtron/pkg/pipeline/executors" @@ -1101,7 +1104,20 @@ func InitializeApp() (*App, error) { userResourceExtendedServiceImpl := userResource.NewUserResourceExtendedServiceImpl(sugaredLogger, teamServiceImpl, environmentServiceImpl, appCrudOperationServiceImpl, chartGroupServiceImpl, appListingServiceImpl, appWorkflowServiceImpl, k8sApplicationServiceImpl, clusterServiceImplExtended, commonEnforcementUtilImpl, enforcerUtilImpl, enforcerImpl) restHandlerImpl := userResource2.NewUserResourceRestHandler(sugaredLogger, userServiceImpl, userResourceExtendedServiceImpl) routerImpl := userResource2.NewUserResourceRouterImpl(restHandlerImpl) - muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, deploymentConfigurationRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl, scanningResultRouterImpl, routerImpl) + appManagementServiceImpl := overview.NewAppManagementServiceImpl(sugaredLogger, appRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowRepositoryImpl, environmentRepositoryImpl, teamRepositoryImpl, workflowStageRepositoryImpl, repositoryImpl) + doraMetricsServiceImpl := overview.NewDoraMetricsServiceImpl(sugaredLogger, lensClientImpl, appRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, cdWorkflowRepositoryImpl) + insightsServiceImpl := overview.NewInsightsServiceImpl(sugaredLogger, appRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowRepositoryImpl, environmentRepositoryImpl) + clusterCacheServiceImpl := cache.NewClusterCacheServiceImpl(sugaredLogger) + clusterOverviewConfig, err := config5.GetClusterOverviewConfig() + if err != nil { + return nil, err + } + clusterOverviewServiceImpl := overview.NewClusterOverviewServiceImpl(sugaredLogger, clusterServiceImplExtended, k8sCapacityServiceImpl, clusterCacheServiceImpl, k8sCommonServiceImpl, enforcerImpl, clusterOverviewConfig) + securityOverviewServiceImpl := overview.NewSecurityOverviewServiceImpl(sugaredLogger, imageScanResultRepositoryImpl, imageScanDeployInfoRepositoryImpl, cveStoreRepositoryImpl, ciPipelineRepositoryImpl, cdWorkflowRepositoryImpl) + overviewServiceImpl := overview.NewOverviewServiceImpl(appManagementServiceImpl, doraMetricsServiceImpl, insightsServiceImpl, clusterOverviewServiceImpl, clusterCacheServiceImpl, securityOverviewServiceImpl) + overviewRestHandlerImpl := restHandler.NewOverviewRestHandlerImpl(sugaredLogger, overviewServiceImpl, userServiceImpl, validate, enforcerImpl) + overviewRouterImpl := router.NewOverviewRouterImpl(overviewRestHandlerImpl) + muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, deploymentConfigurationRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl, scanningResultRouterImpl, routerImpl, overviewRouterImpl) loggingMiddlewareImpl := util4.NewLoggingMiddlewareImpl(userServiceImpl) cdWorkflowServiceImpl := cd.NewCdWorkflowServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) webhookServiceImpl := pipeline.NewWebhookServiceImpl(ciArtifactRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowCommonServiceImpl, workFlowStageStatusServiceImpl, ciServiceImpl)