Skip to content

Commit

Permalink
handle older crds (#76)
Browse files Browse the repository at this point in the history
* update elastiService definition

* handle older elastiServices and improve error logging

* omit nil check as len() handles it
  • Loading branch information
Maanas-23 authored Feb 7, 2025
1 parent 5ce669f commit c9479aa
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 9 deletions.
27 changes: 27 additions & 0 deletions charts/elasti/templates/elastiservice-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,19 @@ spec:
spec:
description: ElastiServiceSpec defines the desired state of ElastiService
properties:
autoscaler:
properties:
name:
type: string
type:
type: string
required:
- name
- type
type: object
cooldownPeriod:
format: int32
type: integer
minTargetReplicas:
format: int32
type: integer
Expand All @@ -57,6 +70,17 @@ spec:
type: object
service:
type: string
triggers:
items:
properties:
metadata:
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- type
type: object
type: array
type: object
status:
description: ElastiServiceStatus defines the observed state of ElastiService
Expand All @@ -67,6 +91,9 @@ spec:
Important: Run "make" to regenerate code after modifying this file
format: date-time
type: string
lastScaledUpTime:
format: date-time
type: string
mode:
type: string
type: object
Expand Down
23 changes: 17 additions & 6 deletions pkg/scaling/scale_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,15 +124,19 @@ func (h *ScaleHandler) handleScaleToZero(ctx context.Context, es *v1alpha1.Elast
Namespace: es.Namespace,
}
shouldScale := true
if len(es.Spec.Triggers) == 0 {
h.logger.Info("No triggers found, skipping scale to zero", zap.String("namespacedName", namespacedName.String()))
return nil
}
for _, trigger := range es.Spec.Triggers {
scaler, err := h.createScalerForTrigger(&trigger)
if err != nil {
return fmt.Errorf("failed to create scaler for trigger: %w", err)
return fmt.Errorf("failed to create scaler for %s: %w", namespacedName.String(), err)
}

scalerResult, err := scaler.ShouldScaleToZero(ctx)
if err != nil {
return fmt.Errorf("failed to check scaler: %w", err)
return fmt.Errorf("failed to check scaler for %s: %w", namespacedName.String(), err)
}
if !scalerResult {
shouldScale = false
Expand All @@ -141,7 +145,7 @@ func (h *ScaleHandler) handleScaleToZero(ctx context.Context, es *v1alpha1.Elast

err = scaler.Close(ctx)
if err != nil {
h.logger.Error("failed to close scaler", zap.Error(err))
h.logger.Error("failed to close scaler", zap.String("namespacedName", namespacedName.String()), zap.Error(err))
}
}
if !shouldScale {
Expand All @@ -159,6 +163,9 @@ func (h *ScaleHandler) handleScaleToZero(ctx context.Context, es *v1alpha1.Elast
// Check cooldown period
if es.Status.LastScaledUpTime != nil {
cooldownPeriod := time.Second * time.Duration(es.Spec.CooldownPeriod)
if cooldownPeriod == 0 {
cooldownPeriod = values.DefaultCooldownPeriod
}

if time.Since(es.Status.LastScaledUpTime.Time) < cooldownPeriod {
h.logger.Info("Skipping scale down as minimum cooldownPeriod not met",
Expand All @@ -181,17 +188,21 @@ func (h *ScaleHandler) handleScaleFromZero(ctx context.Context, es *v1alpha1.Ela
Namespace: es.Namespace,
}
shouldScale := false
if len(es.Spec.Triggers) == 0 {
h.logger.Info("No triggers found, skipping scale from zero", zap.String("namespacedName", namespacedName.String()))
return nil
}
for _, trigger := range es.Spec.Triggers {
scaler, err := h.createScalerForTrigger(&trigger)
if err != nil {
h.logger.Error("failed to create scaler for trigger", zap.Error(err))
h.logger.Error("failed to create scaler for trigger", zap.String("namespacedName", namespacedName.String()), zap.Error(err))
shouldScale = true
break
}

scalerResult, err := scaler.ShouldScaleFromZero(ctx)
if err != nil {
h.logger.Error("failed to check scaler", zap.Error(err))
h.logger.Error("failed to check scaler", zap.String("namespacedName", namespacedName.String()), zap.Error(err))
shouldScale = true
break
}
Expand All @@ -202,7 +213,7 @@ func (h *ScaleHandler) handleScaleFromZero(ctx context.Context, es *v1alpha1.Ela

err = scaler.Close(ctx)
if err != nil {
h.logger.Error("failed to close scaler", zap.Error(err))
h.logger.Error("failed to close scaler", zap.String("namespacedName", namespacedName.String()), zap.Error(err))
}
}
if !shouldScale {
Expand Down
10 changes: 8 additions & 2 deletions pkg/scaling/scalers/prometheus_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,12 @@ func (s *prometheusScaler) executePromQuery(ctx context.Context) (float64, error
func (s *prometheusScaler) ShouldScaleToZero(ctx context.Context) (bool, error) {
metricValue, err := s.executePromQuery(ctx)
if err != nil {
return false, fmt.Errorf("failed to execute prometheus query: %w", err)
return false, fmt.Errorf("failed to execute prometheus query %s: %w", s.metadata.Query, err)
}

if metricValue == -1 {
return false, nil
}
if metricValue < s.metadata.Threshold {
return true, nil
}
Expand All @@ -157,7 +160,10 @@ func (s *prometheusScaler) ShouldScaleToZero(ctx context.Context) (bool, error)
func (s *prometheusScaler) ShouldScaleFromZero(ctx context.Context) (bool, error) {
metricValue, err := s.executePromQuery(ctx)
if err != nil {
return false, fmt.Errorf("failed to execute prometheus query: %w", err)
return true, fmt.Errorf("failed to execute prometheus query %s: %w", s.metadata.Query, err)
}
if metricValue == -1 {
return true, nil
}

if metricValue >= s.metadata.Threshold {
Expand Down
8 changes: 7 additions & 1 deletion pkg/values/values.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
package values

import "k8s.io/apimachinery/pkg/runtime/schema"
import (
"time"

"k8s.io/apimachinery/pkg/runtime/schema"
)

const (
ArgoPhaseHealthy = "Healthy"
Expand All @@ -15,6 +19,8 @@ const (
NullMode = ""

Success = "success"

DefaultCooldownPeriod = time.Second * 300
)

var (
Expand Down

0 comments on commit c9479aa

Please sign in to comment.