@ -13,12 +13,14 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
messageReady = "All components ready"
messageFailed = "Some LokiStack components failed"
messagePending = "Some LokiStack components pending on dependencies"
messageReady = "All components ready"
messageFailed = "Some LokiStack components failed"
messagePending = "Some LokiStack components pending on dependencies"
messageDegradedNodeLabels = "Cluster contains no nodes matching the labels used for zone-awareness"
)
var (
@ -37,6 +39,11 @@ var (
Message : messageReady ,
Reason : string ( lokiv1 . ReasonReadyComponents ) ,
}
conditionDegradedNodeLabels = metav1 . Condition {
Type : string ( lokiv1 . ConditionDegraded ) ,
Message : messageDegradedNodeLabels ,
Reason : string ( lokiv1 . ReasonNoZoneAwareNodes ) ,
}
)
// DegradedError contains information about why the managed LokiStack has an invalid configuration.
@ -61,7 +68,7 @@ func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, m
return updateCondition ( ctx , k , req , degraded )
}
func generateCondition ( cs * lokiv1 . LokiStackComponentStatus ) metav1 . Condition {
func generateCondition ( ctx context . Context , c s * lokiv1 . LokiStackComponentStatus , k client . Client , req ctrl . Request , stack * lokiv1 . LokiStack ) ( metav1 . Condition , error ) {
// Check for failed pods first
failed := len ( cs . Compactor [ corev1 . PodFailed ] ) +
len ( cs . Distributor [ corev1 . PodFailed ] ) +
@ -73,7 +80,7 @@ func generateCondition(cs *lokiv1.LokiStackComponentStatus) metav1.Condition {
len ( cs . Ruler [ corev1 . PodFailed ] )
if failed != 0 {
return conditionFailed
return conditionFailed , nil
}
// Check for pending pods
@ -87,10 +94,37 @@ func generateCondition(cs *lokiv1.LokiStackComponentStatus) metav1.Condition {
len ( cs . Ruler [ corev1 . PodPending ] )
if pending != 0 {
return conditionPending
if stack . Spec . Replication != nil && len ( stack . Spec . Replication . Zones ) > 0 {
// When there are pending pods and zone-awareness is enabled check if there are any nodes
// that can satisfy the constraints and emit a condition if not.
nodesOk , err := checkForZoneawareNodes ( ctx , k , stack . Spec . Replication . Zones )
if err != nil {
return metav1 . Condition { } , err
}
if ! nodesOk {
return conditionDegradedNodeLabels , nil
}
}
return conditionPending , nil
}
return conditionReady , nil
}
func checkForZoneawareNodes ( ctx context . Context , k client . Client , zones [ ] lokiv1 . ZoneSpec ) ( bool , error ) {
nodeLabels := client . HasLabels { }
for _ , z := range zones {
nodeLabels = append ( nodeLabels , z . TopologyKey )
}
nodeList := & corev1 . NodeList { }
if err := k . List ( ctx , nodeList , nodeLabels ) ; err != nil {
return false , err
}
return conditionReady
return len ( nodeList . Items ) > 0 , nil
}
func updateCondition ( ctx context . Context , k k8s . Client , req ctrl . Request , condition metav1 . Condition ) error {