diff --git a/pkg/backends/backends.go b/pkg/backends/backends.go index 5f0a9b4a2b..5375515f14 100644 --- a/pkg/backends/backends.go +++ b/pkg/backends/backends.go @@ -71,6 +71,18 @@ func NewPoolWithConnectionTrackingPolicy(cloud *gce.Cloud, namer namer.BackendNa } } +// L4BackendServiceParams encapsulates parameters for ensuring an L4 BackendService. +type L4BackendServiceParams struct { + Name string + HealthCheckLink string + Protocol string + SessionAffinity string + Scheme string + NamespacedName types.NamespacedName + NetworkInfo *network.NetworkInfo + ConnectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy +} + // ensureDescription updates the BackendService Description with the expected value func ensureDescription(be *composite.BackendService, sp *utils.ServicePort) (needsUpdate bool) { desc := sp.GetDescription() @@ -306,16 +318,16 @@ func (b *Backends) DeleteSignedUrlKey(be *composite.BackendService, keyName stri } // EnsureL4BackendService creates or updates the backend service with the given name. -// TODO(code-elinka): refactor the list of arguments (there are too many now) -func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinity, scheme string, namespacedName types.NamespacedName, network network.NetworkInfo, connectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy, beLogger klog.Logger) (*composite.BackendService, error) { +func (b *Backends) EnsureL4BackendService(params L4BackendServiceParams, beLogger klog.Logger) (*composite.BackendService, error) { start := time.Now() - beLogger.V(2).Info("EnsureL4BackendService started", "serviceKey", namespacedName, "scheme", scheme, "protocol", protocol, "sessionAffinity", sessionAffinity, "network", network.NetworkURL, "subnetwork", network.SubnetworkURL) + beLogger = beLogger.WithValues("serviceKey", params.NamespacedName, "scheme", params.Scheme, "protocol", params.Protocol, "sessionAffinity", params.SessionAffinity, "network", params.NetworkInfo) + beLogger.V(2).Info("EnsureL4BackendService started") defer func() { - beLogger.V(2).Info("EnsureL4BackendService finished", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName, "timeTaken", time.Since(start)) + beLogger.V(2).Info("EnsureL4BackendService finished", "timeTaken", time.Since(start)) }() - beLogger.V(2).Info("EnsureL4BackendService: checking existing backend service", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName) - key, err := composite.CreateKey(b.cloud, name, meta.Regional) + beLogger.V(2).Info("EnsureL4BackendService: checking existing backend service") + key, err := composite.CreateKey(b.cloud, params.Name, meta.Regional) if err != nil { return nil, err } @@ -323,28 +335,28 @@ func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinit if err != nil && !utils.IsNotFoundError(err) { return nil, err } - desc, err := utils.MakeL4LBServiceDescription(namespacedName.String(), "", meta.VersionGA, false, utils.ILB) + desc, err := utils.MakeL4LBServiceDescription(params.NamespacedName.String(), "", meta.VersionGA, false, utils.ILB) if err != nil { beLogger.Info("EnsureL4BackendService: Failed to generate description for BackendService", "err", err) } expectedBS := &composite.BackendService{ - Name: name, - Protocol: protocol, + Name: params.Name, + Protocol: params.Protocol, Description: desc, - HealthChecks: []string{hcLink}, - SessionAffinity: utils.TranslateAffinityType(sessionAffinity, beLogger), - LoadBalancingScheme: scheme, + HealthChecks: []string{params.HealthCheckLink}, + SessionAffinity: utils.TranslateAffinityType(params.SessionAffinity, beLogger), + LoadBalancingScheme: params.Scheme, } // We need this configuration only for Strong Session Affinity feature if b.useConnectionTrackingPolicy { - beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using connection tracking policy: %+v", connectionTrackingPolicy), "serviceKey", namespacedName) - expectedBS.ConnectionTrackingPolicy = connectionTrackingPolicy + beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using connection tracking policy: %+v", params.ConnectionTrackingPolicy), "serviceKey", params.NamespacedName) + expectedBS.ConnectionTrackingPolicy = params.ConnectionTrackingPolicy } - if !network.IsDefault { - beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using non-default network: %+v", network), "serviceKey", namespacedName) - expectedBS.Network = network.NetworkURL + if params.NetworkInfo != nil && !params.NetworkInfo.IsDefault { + beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using non-default network: %+v", params.NetworkInfo)) + expectedBS.Network = params.NetworkInfo.NetworkURL } - if protocol == string(api_v1.ProtocolTCP) { + if params.Protocol == string(api_v1.ProtocolTCP) { expectedBS.ConnectionDraining = &composite.ConnectionDraining{DrainingTimeoutSec: DefaultConnectionDrainingTimeoutSeconds} } else { // This config is not supported in UDP mode, explicitly set to 0 to reset, if proto was TCP previously. @@ -353,12 +365,12 @@ func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinit // Create backend service if none was found if bs == nil { - beLogger.V(2).Info("EnsureL4BackendService: creating backend service", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName) + beLogger.V(2).Info("EnsureL4BackendService: creating backend service") err := composite.CreateBackendService(b.cloud, key, expectedBS, beLogger) if err != nil { return nil, err } - beLogger.V(2).Info("EnsureL4BackendService: created backend service successfully", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName) + beLogger.V(2).Info("EnsureL4BackendService: created backend service successfully") // We need to perform a GCE call to re-fetch the object we just created // so that the "Fingerprint" field is filled in. This is needed to update the // object without error. The lookup is also needed to populate the selfLink. @@ -366,14 +378,14 @@ func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinit } if backendSvcEqual(expectedBS, bs, b.useConnectionTrackingPolicy) { - beLogger.V(2).Info("EnsureL4BackendService: backend service did not change, skipping update", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName) + beLogger.V(2).Info("EnsureL4BackendService: backend service did not change, skipping update") return bs, nil } - if bs.ConnectionDraining != nil && bs.ConnectionDraining.DrainingTimeoutSec > 0 && protocol == string(api_v1.ProtocolTCP) { + if bs.ConnectionDraining != nil && bs.ConnectionDraining.DrainingTimeoutSec > 0 && params.Protocol == string(api_v1.ProtocolTCP) { // only preserves user overridden timeout value when the protocol is TCP expectedBS.ConnectionDraining.DrainingTimeoutSec = bs.ConnectionDraining.DrainingTimeoutSec } - beLogger.V(2).Info("EnsureL4BackendService: updating backend service", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName) + beLogger.V(2).Info("EnsureL4BackendService: updating backend service") // Set fingerprint for optimistic locking expectedBS.Fingerprint = bs.Fingerprint // Copy backends to avoid detaching them during update. This could be replaced with a patch call in the future. @@ -381,7 +393,7 @@ func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinit if err := composite.UpdateBackendService(b.cloud, key, expectedBS, beLogger); err != nil { return nil, err } - beLogger.V(2).Info("EnsureL4BackendService: updated backend service successfully", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName) + beLogger.V(2).Info("EnsureL4BackendService: updated backend service successfully") return composite.GetBackendService(b.cloud, key, meta.VersionGA, beLogger) } diff --git a/pkg/backends/backends_test.go b/pkg/backends/backends_test.go index 371a84adc1..881177c41e 100644 --- a/pkg/backends/backends_test.go +++ b/pkg/backends/backends_test.go @@ -103,8 +103,18 @@ func TestEnsureL4BackendService(t *testing.T) { hcLink := l4namer.L4HealthCheck(tc.serviceNamespace, tc.serviceName, false) bsName := l4namer.L4Backend(tc.serviceNamespace, tc.serviceName) - network := network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"} - bs, err := backendPool.EnsureL4BackendService(bsName, hcLink, tc.protocol, tc.affinityType, tc.schemeType, namespacedName, network, tc.connectionTrackingPolicy, klog.TODO()) + network := &network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"} + backendParams := L4BackendServiceParams{ + Name: bsName, + HealthCheckLink: hcLink, + Protocol: tc.protocol, + SessionAffinity: tc.affinityType, + Scheme: tc.schemeType, + NamespacedName: namespacedName, + NetworkInfo: network, + ConnectionTrackingPolicy: tc.connectionTrackingPolicy, + } + bs, err := backendPool.EnsureL4BackendService(backendParams, klog.TODO()) if err != nil { t.Errorf("EnsureL4BackendService failed") } @@ -162,7 +172,7 @@ func TestEnsureL4BackendServiceDoesNotDetachBackends(t *testing.T) { hcLink := l4namer.L4HealthCheck(serviceNamespace, serviceName, false) bsName := l4namer.L4Backend(serviceNamespace, serviceName) - network := network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"} + network := &network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"} backendName := "testNeg" existingBS := &composite.BackendService{ @@ -189,7 +199,17 @@ func TestEnsureL4BackendServiceDoesNotDetachBackends(t *testing.T) { } var noConnectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy = nil - bs, err := backendPool.EnsureL4BackendService(bsName, hcLink, "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), namespacedName, network, noConnectionTrackingPolicy, klog.TODO()) + backendParams := L4BackendServiceParams{ + Name: bsName, + HealthCheckLink: hcLink, + Protocol: "TCP", + SessionAffinity: string(v1.ServiceAffinityNone), + Scheme: string(cloud.SchemeInternal), + NamespacedName: namespacedName, + NetworkInfo: network, + ConnectionTrackingPolicy: noConnectionTrackingPolicy, + } + bs, err := backendPool.EnsureL4BackendService(backendParams, klog.TODO()) if err != nil { t.Errorf("EnsureL4BackendService failed") } diff --git a/pkg/backends/ig_linker_test.go b/pkg/backends/ig_linker_test.go index 71be844f37..13c81ac4b5 100644 --- a/pkg/backends/ig_linker_test.go +++ b/pkg/backends/ig_linker_test.go @@ -38,7 +38,10 @@ import ( "k8s.io/klog/v2" ) -const defaultZone = "zone-a" +const ( + defaultTestZone = "zone-a" + defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" +) func newTestIGLinker(fakeGCE *gce.Cloud, fakeInstancePool instancegroups.Manager) *instanceGroupLinker { fakeBackendPool := NewPool(fakeGCE, defaultNamer) @@ -56,8 +59,8 @@ func TestLink(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) nodeInformer := zonegetter.FakeNodeInformer() - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) - zonegetter.AddFakeNodes(fakeZoneGetter, defaultZone, "test-instance") + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + zonegetter.AddFakeNodes(fakeZoneGetter, defaultTestZone, "test-instance") fakeNodePool := instancegroups.NewManager(&instancegroups.ManagerConfig{ Cloud: fakeIGs, @@ -79,7 +82,7 @@ func TestLink(t *testing.T) { // Mimic the syncer creating the backend. linker.backendPool.Create(sp, "fake-health-check-link", klog.TODO()) - if err := linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil { + if err := linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil { t.Fatalf("%v", err) } @@ -98,8 +101,8 @@ func TestLinkWithCreationModeError(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) nodeInformer := zonegetter.FakeNodeInformer() - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) - zonegetter.AddFakeNodes(fakeZoneGetter, defaultZone, "test-instance") + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + zonegetter.AddFakeNodes(fakeZoneGetter, defaultTestZone, "test-instance") fakeNodePool := instancegroups.NewManager(&instancegroups.ManagerConfig{ Cloud: fakeIGs, @@ -135,7 +138,7 @@ func TestLinkWithCreationModeError(t *testing.T) { // Mimic the syncer creating the backend. linker.backendPool.Create(sp, "fake-health-check-link", klog.TODO()) - if err := linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil { + if err := linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil { t.Fatalf("%v", err) } diff --git a/pkg/backends/integration_test.go b/pkg/backends/integration_test.go index 9794cd7602..60e0998614 100644 --- a/pkg/backends/integration_test.go +++ b/pkg/backends/integration_test.go @@ -54,8 +54,8 @@ func newTestJig(fakeGCE *gce.Cloud) *Jig { fakeIGs := instancegroups.NewEmptyFakeInstanceGroups() nodeInformer := zonegetter.FakeNodeInformer() - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) - zonegetter.AddFakeNodes(fakeZoneGetter, defaultZone, "test-instance") + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + zonegetter.AddFakeNodes(fakeZoneGetter, defaultTestZone, "test-instance") fakeInstancePool := instancegroups.NewManager(&instancegroups.ManagerConfig{ Cloud: fakeIGs, @@ -95,7 +95,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil { t.Fatalf("Did not expect error when syncing backend with port %v", sp.NodePort) } - if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil { + if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil { t.Fatalf("Did not expect error when linking backend with port %v to groups", sp.NodePort) } @@ -106,8 +106,8 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { // Simulate another controller updating the same backend service with // a different instance group newGroups := []*compute.Backend{ - {Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultZone, "k8s-ig-bar")}, - {Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultZone, "k8s-ig-foo")}, + {Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultTestZone, "k8s-ig-bar")}, + {Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultTestZone, "k8s-ig-foo")}, } be.Backends = append(be.Backends, newGroups...) if err = fakeGCE.UpdateGlobalBackendService(be); err != nil { @@ -123,7 +123,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil { t.Fatalf("Did not expect error when syncing backend with port %v", sp.NodePort) } - if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil { + if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil { t.Fatalf("Did not expect error when linking backend with port %v to groups", sp.NodePort) } @@ -141,7 +141,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { } // seed expectedGroups with the first group native to this controller - expectedGroups := sets.NewString(fmt.Sprintf("zones/%s/instanceGroups/%s", defaultZone, "k8s-ig--uid1")) + expectedGroups := sets.NewString(fmt.Sprintf("zones/%s/instanceGroups/%s", defaultTestZone, "k8s-ig--uid1")) for _, newGroup := range newGroups { igPath, err := utils.ResourcePath(newGroup.Group) if err != nil { @@ -168,7 +168,7 @@ func TestSyncChaosMonkey(t *testing.T) { if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil { t.Fatalf("Did not expect error when syncing backend with port %v, err: %v", sp.NodePort, err) } - if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil { + if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil { t.Fatalf("Did not expect error when linking backend with port %v to groups, err: %v", sp.NodePort, err) } @@ -201,7 +201,7 @@ func TestSyncChaosMonkey(t *testing.T) { if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil { t.Fatalf("Did not expect error when syncing backend with port %v", sp.NodePort) } - if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil { + if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil { t.Fatalf("Did not expect error when linking backend with port %v to groups", sp.NodePort) } if createCalls > 0 { @@ -212,7 +212,7 @@ func TestSyncChaosMonkey(t *testing.T) { if err != nil { t.Fatalf("Failed to find a backend with name %v: %v", beName, err) } - gotGroup, err := jig.fakeInstancePool.Get(defaultNamer.InstanceGroup(), defaultZone) + gotGroup, err := jig.fakeInstancePool.Get(defaultNamer.InstanceGroup(), defaultTestZone) if err != nil { t.Fatalf("Failed to find instance group %v", defaultNamer.InstanceGroup()) } diff --git a/pkg/backends/regional_ig_linker_test.go b/pkg/backends/regional_ig_linker_test.go index 7c42a25231..942aa7d72c 100644 --- a/pkg/backends/regional_ig_linker_test.go +++ b/pkg/backends/regional_ig_linker_test.go @@ -23,7 +23,6 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/cloud-provider-gcp/providers/gce" - "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/instancegroups" "k8s.io/ingress-gce/pkg/network" "k8s.io/ingress-gce/pkg/test" @@ -54,7 +53,7 @@ func newTestRegionalIgLinker(fakeGCE *gce.Cloud, backendPool *Backends, l4Namer fakeIGs := instancegroups.NewEmptyFakeInstanceGroups() nodeInformer := zonegetter.FakeNodeInformer() - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) zonegetter.AddFakeNodes(fakeZoneGetter, usCentral1AZone, "test-instance1") zonegetter.AddFakeNodes(fakeZoneGetter, "us-central1-c", "test-instance2") @@ -236,13 +235,17 @@ func TestRegionalUpdateLinkWithRemovedBackends(t *testing.T) { func createBackendService(t *testing.T, sp utils.ServicePort, backendPool *Backends) { t.Helper() - namespacedName := types.NamespacedName{Name: "service.Name", Namespace: "service.Namespace"} - protocol := string(apiv1.ProtocolTCP) - serviceAffinityNone := string(apiv1.ServiceAffinityNone) - schemeExternal := string(cloud.SchemeExternal) - defaultNetworkInfo := network.NetworkInfo{IsDefault: true} - var noConnectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy = nil - if _, err := backendPool.EnsureL4BackendService(sp.BackendName(), hcLink, protocol, serviceAffinityNone, schemeExternal, namespacedName, defaultNetworkInfo, noConnectionTrackingPolicy, klog.TODO()); err != nil { + backendParams := L4BackendServiceParams{ + Name: sp.BackendName(), + HealthCheckLink: hcLink, + Protocol: string(apiv1.ProtocolTCP), + SessionAffinity: string(apiv1.ServiceAffinityNone), + Scheme: string(cloud.SchemeExternal), + NamespacedName: types.NamespacedName{Name: "service.Name", Namespace: "service.Namespace"}, + NetworkInfo: &network.NetworkInfo{IsDefault: true}, + ConnectionTrackingPolicy: nil, + } + if _, err := backendPool.EnsureL4BackendService(backendParams, klog.TODO()); err != nil { t.Fatalf("Error creating backend service %v", err) } } diff --git a/pkg/context/context.go b/pkg/context/context.go index 3ac538f24b..2c1eb5eb5e 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" firewallclient "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/clientset/versioned" - informerfirewall "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1" + informerfirewall "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1" networkclient "k8s.io/cloud-provider-gcp/crd/client/network/clientset/versioned" informernetwork "k8s.io/cloud-provider-gcp/crd/client/network/informers/externalversions/network/v1" "k8s.io/cloud-provider-gcp/providers/gce" @@ -238,7 +238,8 @@ func NewControllerContext( context.EnableIngressRegionalExternal, logger, ) - context.ZoneGetter = zonegetter.NewZoneGetter(context.NodeInformer) + // The subnet specified in gce.conf is considered as the default subnet. + context.ZoneGetter = zonegetter.NewZoneGetter(context.NodeInformer, context.Cloud.SubnetworkURL()) context.InstancePool = instancegroups.NewManager(&instancegroups.ManagerConfig{ Cloud: context.Cloud, Namer: context.ClusterNamer, diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index ce51daeca6..bf83de1f67 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -55,6 +55,8 @@ import ( "k8s.io/ingress-gce/pkg/utils/zonegetter" ) +const defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" + var ( nodePortCounter = 30000 clusterUID = "aaaaa" @@ -67,7 +69,7 @@ func newLoadBalancerController() *LoadBalancerController { backendConfigClient := backendconfigclient.NewSimpleClientset() fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) nodeInformer := zonegetter.FakeNodeInformer() - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) zonegetter.AddFakeNodes(fakeZoneGetter, fakeZone, "test-node") (fakeGCE.Compute().(*cloud.MockGCE)).MockGlobalForwardingRules.InsertHook = loadbalancers.InsertGlobalForwardingRuleHook diff --git a/pkg/instancegroups/controller_test.go b/pkg/instancegroups/controller_test.go index 53eaa407d7..311cc67de3 100644 --- a/pkg/instancegroups/controller_test.go +++ b/pkg/instancegroups/controller_test.go @@ -97,7 +97,7 @@ func TestSync(t *testing.T) { config.HasSynced = func() bool { return true } - config.ZoneGetter = zonegetter.NewZoneGetter(informer) + config.ZoneGetter = zonegetter.NewFakeZoneGetter(informer, defaultTestSubnetURL, false) controller := NewController(config, logr.Logger{}) diff --git a/pkg/instancegroups/manager_test.go b/pkg/instancegroups/manager_test.go index fae9f0dd62..66b3172f61 100644 --- a/pkg/instancegroups/manager_test.go +++ b/pkg/instancegroups/manager_test.go @@ -18,10 +18,11 @@ package instancegroups import ( "fmt" - "k8s.io/klog/v2" "strings" "testing" + "k8s.io/klog/v2" + "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/test" @@ -30,15 +31,17 @@ import ( ) const ( - defaultZone = "default-zone" - basePath = "/basepath/projects/project-id/" + defaultTestZone = "default-zone" + basePath = "/basepath/projects/project-id/" + + defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" ) var defaultNamer = namer.NewNamer("uid1", "fw1", klog.TODO()) func newNodePool(f *FakeInstanceGroups, zone string, maxIGSize int) Manager { nodeInformer := zonegetter.FakeNodeInformer() - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) pool := NewManager(&ManagerConfig{ Cloud: f, @@ -91,16 +94,16 @@ func TestNodePoolSync(t *testing.T) { // create fake gce node pool with existing gceNodes ig := &compute.InstanceGroup{Name: defaultNamer.InstanceGroup()} zonesToIGs := map[string]IGsToInstances{ - defaultZone: { + defaultTestZone: { ig: testCase.gceNodes, }, } fakeGCEInstanceGroups := NewFakeInstanceGroups(zonesToIGs, maxIGSize) - pool := newNodePool(fakeGCEInstanceGroups, defaultZone, maxIGSize) + pool := newNodePool(fakeGCEInstanceGroups, defaultTestZone, maxIGSize) for _, kubeNode := range testCase.kubeNodes.List() { manager := pool.(*manager) - zonegetter.AddFakeNodes(manager.ZoneGetter, defaultZone, kubeNode) + zonegetter.AddFakeNodes(manager.ZoneGetter, defaultTestZone, kubeNode) } igName := defaultNamer.InstanceGroup() @@ -123,9 +126,9 @@ func TestNodePoolSync(t *testing.T) { t.Errorf("Should skip sync. apiCallsCountBeforeSync = %d, apiCallsCountAfterSync = %d", apiCallsCountBeforeSync, apiCallsCountAfterSync) } - instancesList, err := fakeGCEInstanceGroups.ListInstancesInInstanceGroup(ig.Name, defaultZone, allInstances) + instancesList, err := fakeGCEInstanceGroups.ListInstancesInInstanceGroup(ig.Name, defaultTestZone, allInstances) if err != nil { - t.Fatalf("fakeGCEInstanceGroups.ListInstancesInInstanceGroup(%s, %s, %s) returned error %v, want nil", ig.Name, defaultZone, allInstances, err) + t.Fatalf("fakeGCEInstanceGroups.ListInstancesInInstanceGroup(%s, %s, %s) returned error %v, want nil", ig.Name, defaultTestZone, allInstances, err) } instances, err := test.InstancesListToNameSet(instancesList) if err != nil { @@ -161,14 +164,14 @@ func TestNodePoolSync(t *testing.T) { func TestSetNamedPorts(t *testing.T) { maxIGSize := 1000 zonesToIGs := map[string]IGsToInstances{ - defaultZone: { + defaultTestZone: { &compute.InstanceGroup{Name: "ig"}: sets.NewString("ig"), }, } fakeIGs := NewFakeInstanceGroups(zonesToIGs, maxIGSize) - pool := newNodePool(fakeIGs, defaultZone, maxIGSize) + pool := newNodePool(fakeIGs, defaultTestZone, maxIGSize) manager := pool.(*manager) - zonegetter.AddFakeNodes(manager.ZoneGetter, defaultZone, "test-node") + zonegetter.AddFakeNodes(manager.ZoneGetter, defaultTestZone, "test-node") testCases := []struct { activePorts []int64 @@ -219,11 +222,11 @@ func TestSetNamedPorts(t *testing.T) { func TestGetInstanceReferences(t *testing.T) { maxIGSize := 1000 zonesToIGs := map[string]IGsToInstances{ - defaultZone: { + defaultTestZone: { &compute.InstanceGroup{Name: "ig"}: sets.NewString("ig"), }, } - pool := newNodePool(NewFakeInstanceGroups(zonesToIGs, maxIGSize), defaultZone, maxIGSize) + pool := newNodePool(NewFakeInstanceGroups(zonesToIGs, maxIGSize), defaultTestZone, maxIGSize) instances := pool.(*manager) nodeNames := []string{"node-1", "node-2", "node-3", "node-4.region.zone"} @@ -231,10 +234,10 @@ func TestGetInstanceReferences(t *testing.T) { expectedRefs := map[string]struct{}{} for _, nodeName := range nodeNames { name := strings.Split(nodeName, ".")[0] - expectedRefs[fmt.Sprintf("%szones/%s/instances/%s", basePath, defaultZone, name)] = struct{}{} + expectedRefs[fmt.Sprintf("%szones/%s/instances/%s", basePath, defaultTestZone, name)] = struct{}{} } - refs := instances.getInstanceReferences(defaultZone, nodeNames) + refs := instances.getInstanceReferences(defaultTestZone, nodeNames) for _, ref := range refs { if _, ok := expectedRefs[ref.Instance]; !ok { t.Errorf("found unexpected reference: %s, expected only %+v", ref.Instance, expectedRefs) diff --git a/pkg/l4lb/l4controller.go b/pkg/l4lb/l4controller.go index 52e89d8cda..5922e4b545 100644 --- a/pkg/l4lb/l4controller.go +++ b/pkg/l4lb/l4controller.go @@ -121,15 +121,16 @@ func NewILBController(ctx *context.ControllerContext, stopCh <-chan struct{}, lo addSvc := obj.(*v1.Service) svcKey := utils.ServiceKeyFunc(addSvc.Namespace, addSvc.Name) needsILB, svcType := annotations.WantsL4ILB(addSvc) + svcLogger := logger.WithValues("serviceKey", svcKey) // Check for deletion since updates or deletes show up as Add when controller restarts. if needsILB || l4c.needsDeletion(addSvc) { - logger.V(3).Info("ILB Service added, enqueuing", "serviceKey", svcKey) + svcLogger.V(3).Info("ILB Service added, enqueuing") l4c.ctx.Recorder(addSvc.Namespace).Eventf(addSvc, v1.EventTypeNormal, "ADD", svcKey) - l4c.serviceVersions.SetLastUpdateSeen(svcKey, addSvc.ResourceVersion, logger) + l4c.serviceVersions.SetLastUpdateSeen(svcKey, addSvc.ResourceVersion, svcLogger) l4c.svcQueue.Enqueue(addSvc) l4c.enqueueTracker.Track() } else { - logger.V(4).Info("Ignoring add for non-lb service", "serviceKey", svcKey, "serviceType", svcType) + svcLogger.V(4).Info("Ignoring add for non-lb service", "serviceType", svcType) } }, // Deletes will be handled in the Update when the deletion timestamp is set. @@ -137,11 +138,12 @@ func NewILBController(ctx *context.ControllerContext, stopCh <-chan struct{}, lo curSvc := cur.(*v1.Service) svcKey := utils.ServiceKeyFunc(curSvc.Namespace, curSvc.Name) oldSvc := old.(*v1.Service) + svcLogger := logger.WithValues("serviceKey", svcKey) needsUpdate := l4c.needsUpdate(oldSvc, curSvc) needsDeletion := l4c.needsDeletion(curSvc) if needsUpdate || needsDeletion { - logger.V(3).Info("Service changed, enqueuing", "serviceKey", svcKey, "needsUpdate", needsUpdate, "needsDeletion", needsDeletion) - l4c.serviceVersions.SetLastUpdateSeen(svcKey, curSvc.ResourceVersion, logger) + svcLogger.V(3).Info("Service changed, enqueuing", "needsUpdate", needsUpdate, "needsDeletion", needsDeletion) + l4c.serviceVersions.SetLastUpdateSeen(svcKey, curSvc.ResourceVersion, svcLogger) l4c.svcQueue.Enqueue(curSvc) l4c.enqueueTracker.Track() return @@ -151,11 +153,11 @@ func NewILBController(ctx *context.ControllerContext, stopCh <-chan struct{}, lo if needsILB && reflect.DeepEqual(old, cur) { // this will happen when informers run a resync on all the existing services even when the object is // not modified. - logger.V(3).Info("Periodic enqueueing of service", "serviceKey", svcKey) + svcLogger.V(3).Info("Periodic enqueueing of service") l4c.svcQueue.Enqueue(curSvc) l4c.enqueueTracker.Track() } else if needsILB { - l4c.serviceVersions.SetLastIgnored(svcKey, curSvc.ResourceVersion, logger) + l4c.serviceVersions.SetLastIgnored(svcKey, curSvc.ResourceVersion, svcLogger) } }, }) diff --git a/pkg/l4lb/l4lbcommon.go b/pkg/l4lb/l4lbcommon.go index 0b054118fd..dc18f75280 100644 --- a/pkg/l4lb/l4lbcommon.go +++ b/pkg/l4lb/l4lbcommon.go @@ -67,43 +67,42 @@ func mergeAnnotations(existing, lbAnnotations map[string]string, keysToRemove [] } // updateL4ResourcesAnnotations checks if new annotations should be added to service and patch service metadata if needed. -func updateL4ResourcesAnnotations(ctx *context.ControllerContext, svc *v1.Service, newL4LBAnnotations map[string]string, logger klog.Logger) error { - logger.V(3).Info("Updating annotations of service", "serviceKey", klog.KRef(svc.Namespace, svc.Name)) +func updateL4ResourcesAnnotations(ctx *context.ControllerContext, svc *v1.Service, newL4LBAnnotations map[string]string, svcLogger klog.Logger) error { + svcLogger.V(3).Info("Updating annotations of service") newObjectMeta := computeNewAnnotationsIfNeeded(svc, newL4LBAnnotations, loadbalancers.L4ResourceAnnotationKeys) if newObjectMeta == nil { - logger.V(3).Info("Service annotations not changed, skipping patch for service", "serviceKey", klog.KRef(svc.Namespace, svc.Name)) + svcLogger.V(3).Info("Service annotations not changed, skipping patch for service") return nil } - logger.V(3).Info("Patching annotations of service", "serviceKey", klog.KRef(svc.Namespace, svc.Name)) + svcLogger.V(3).Info("Patching annotations of service") return patch.PatchServiceObjectMetadata(ctx.KubeClient.CoreV1(), svc, *newObjectMeta) } // updateL4DualStackResourcesAnnotations checks if new annotations should be added to dual-stack service and patch service metadata if needed. -func updateL4DualStackResourcesAnnotations(ctx *context.ControllerContext, svc *v1.Service, newL4LBAnnotations map[string]string, logger klog.Logger) error { +func updateL4DualStackResourcesAnnotations(ctx *context.ControllerContext, svc *v1.Service, newL4LBAnnotations map[string]string, svcLogger klog.Logger) error { newObjectMeta := computeNewAnnotationsIfNeeded(svc, newL4LBAnnotations, loadbalancers.L4DualStackResourceAnnotationKeys) if newObjectMeta == nil { return nil } - logger.V(3).Info("Patching annotations of service", "serviceKey", klog.KRef(svc.Namespace, svc.Name)) + svcLogger.V(3).Info("Patching annotations of service") return patch.PatchServiceObjectMetadata(ctx.KubeClient.CoreV1(), svc, *newObjectMeta) } -func deleteAnnotation(ctx *context.ControllerContext, svc *v1.Service, annotationKey string, logger klog.Logger) error { +func deleteAnnotation(ctx *context.ControllerContext, svc *v1.Service, annotationKey string, svcLogger klog.Logger) error { newObjectMeta := svc.ObjectMeta.DeepCopy() if _, ok := newObjectMeta.Annotations[annotationKey]; !ok { return nil } - - logger.V(3).Info("Removing annotation from service", "annotationKey", annotationKey, "serviceKey", klog.KRef(svc.Namespace, svc.Name)) + svcLogger.V(3).Info("Removing annotation from service", "annotationKey", annotationKey) delete(newObjectMeta.Annotations, annotationKey) return patch.PatchServiceObjectMetadata(ctx.KubeClient.CoreV1(), svc, *newObjectMeta) } // updateServiceStatus this faction checks if LoadBalancer status changed and patch service if needed. -func updateServiceStatus(ctx *context.ControllerContext, svc *v1.Service, newStatus *v1.LoadBalancerStatus, logger klog.Logger) error { - logger.V(2).Info("Updating service status", "serviceKey", klog.KRef(svc.Namespace, svc.Name), "newStatus", fmt.Sprintf("%+v", newStatus)) +func updateServiceStatus(ctx *context.ControllerContext, svc *v1.Service, newStatus *v1.LoadBalancerStatus, svcLogger klog.Logger) error { + svcLogger.V(2).Info("Updating service status", "newStatus", fmt.Sprintf("%+v", newStatus)) if helpers.LoadBalancerStatusEqual(&svc.Status.LoadBalancer, newStatus) { - logger.V(2).Info("New and old statuses are equal, skipping patch", "serviceKey", klog.KRef(svc.Namespace, svc.Name)) + svcLogger.V(2).Info("New and old statuses are equal, skipping patch") return nil } return patch.PatchServiceLoadBalancerStatus(ctx.KubeClient.CoreV1(), svc, *newStatus) diff --git a/pkg/l4lb/l4netlbcontroller.go b/pkg/l4lb/l4netlbcontroller.go index b876833fe6..31d1445082 100644 --- a/pkg/l4lb/l4netlbcontroller.go +++ b/pkg/l4lb/l4netlbcontroller.go @@ -129,7 +129,7 @@ func NewL4NetLBController( if shouldProcess, _ := l4netLBc.shouldProcessService(addSvc, nil, svcLogger); shouldProcess { svcLogger.V(3).Info("L4 External LoadBalancer Service added, enqueuing") l4netLBc.ctx.Recorder(addSvc.Namespace).Eventf(addSvc, v1.EventTypeNormal, "ADD", svcKey) - l4netLBc.serviceVersions.SetLastUpdateSeen(svcKey, addSvc.ResourceVersion, logger) + l4netLBc.serviceVersions.SetLastUpdateSeen(svcKey, addSvc.ResourceVersion, svcLogger) l4netLBc.svcQueue.Enqueue(addSvc) l4netLBc.enqueueTracker.Track() } else { @@ -145,7 +145,7 @@ func NewL4NetLBController( if shouldProcess, isResync := l4netLBc.shouldProcessService(curSvc, oldSvc, svcLogger); shouldProcess { svcLogger.V(3).Info("L4 External LoadBalancer Service updated, enqueuing") if !isResync { - l4netLBc.serviceVersions.SetLastUpdateSeen(svcKey, curSvc.ResourceVersion, logger) + l4netLBc.serviceVersions.SetLastUpdateSeen(svcKey, curSvc.ResourceVersion, svcLogger) } l4netLBc.svcQueue.Enqueue(curSvc) l4netLBc.enqueueTracker.Track() diff --git a/pkg/l4lb/updatetype.go b/pkg/l4lb/updatetype.go index 1e93bc18b5..7a9e2d03cc 100644 --- a/pkg/l4lb/updatetype.go +++ b/pkg/l4lb/updatetype.go @@ -51,25 +51,25 @@ func (t *serviceVersionsTracker) getVersionsForSvc(svcKey string) *serviceVersio } -func (t *serviceVersionsTracker) SetLastUpdateSeen(svcKey, version string, logger klog.Logger) { +func (t *serviceVersionsTracker) SetLastUpdateSeen(svcKey, version string, svcLogger klog.Logger) { t.lock.Lock() defer t.lock.Unlock() v := t.getVersionsForSvc(svcKey) v.lastSeenUpdateVersion = version - logger.V(3).Info("serviceVersionsTracker: SetLastUpdateSeen()", "svcKey", svcKey, "version", version) + svcLogger.V(3).Info("serviceVersionsTracker: SetLastUpdateSeen()", "version", version) } -func (t *serviceVersionsTracker) SetLastIgnored(svcKey, version string, logger klog.Logger) { +func (t *serviceVersionsTracker) SetLastIgnored(svcKey, version string, svcLogger klog.Logger) { t.lock.Lock() defer t.lock.Unlock() v := t.getVersionsForSvc(svcKey) v.lastIgnoredVersion = version - logger.V(3).Info("serviceVersionsTracker: SetLastIgnored()", "svcKey", svcKey, "version", version) + svcLogger.V(3).Info("serviceVersionsTracker: SetLastIgnored()", "version", version) } -func (t *serviceVersionsTracker) SetProcessed(svcKey, version string, success, wasResync bool, logger klog.Logger) { +func (t *serviceVersionsTracker) SetProcessed(svcKey, version string, success, wasResync bool, svcLogger klog.Logger) { if wasResync && success { return } @@ -79,16 +79,16 @@ func (t *serviceVersionsTracker) SetProcessed(svcKey, version string, success, w v := t.getVersionsForSvc(svcKey) v.lastProcessedUpdateVersion = version v.lastProcessingSuccess = success - logger.V(3).Info("serviceVersionsTracker: SetProcessed()", "version", version, "success", success, "wasResync", wasResync) + svcLogger.V(3).Info("serviceVersionsTracker: SetProcessed()", "version", version, "success", success, "wasResync", wasResync) } -func (t *serviceVersionsTracker) IsResync(svcKey, currentVersion string, logger klog.Logger) bool { +func (t *serviceVersionsTracker) IsResync(svcKey, currentVersion string, svcLogger klog.Logger) bool { t.lock.Lock() defer t.lock.Unlock() v := t.getVersionsForSvc(svcKey) - logger.V(3).Info("serviceVersionsTracker: IsResync()", "currentVersion", currentVersion, "lastProcessedUpdateVersion", v.lastProcessedUpdateVersion, "lastProcessingSuccess", v.lastProcessingSuccess, "lastSeenUpdate", v.lastSeenUpdateVersion, "lastIgnored", v.lastIgnoredVersion) + svcLogger.V(3).Info("serviceVersionsTracker: IsResync()", "currentVersion", currentVersion, "lastProcessedUpdateVersion", v.lastProcessedUpdateVersion, "lastProcessingSuccess", v.lastProcessingSuccess, "lastSeenUpdate", v.lastSeenUpdateVersion, "lastIgnored", v.lastIgnoredVersion) if !v.lastProcessingSuccess { return false diff --git a/pkg/loadbalancers/l4.go b/pkg/loadbalancers/l4.go index 32d7fa0439..f70260bd00 100644 --- a/pkg/loadbalancers/l4.go +++ b/pkg/loadbalancers/l4.go @@ -473,10 +473,17 @@ func (l4 *L4) EnsureInternalLoadBalancer(nodeNames []string, svc *corev1.Service } // ensure backend service - // TODO(cheungdavid): Create backend logger that contains backendName, - // backendVersion, and backendScope before passing to backendPool.EnsureL4BackendService(). - // See example in backendSyncer.ensureBackendService(). - bs, err := l4.backendPool.EnsureL4BackendService(bsName, hcLink, string(protocol), string(l4.Service.Spec.SessionAffinity), string(cloud.SchemeInternal), l4.NamespacedName, l4.network, noConnectionTrackingPolicy, l4.svcLogger) + backendParams := backends.L4BackendServiceParams{ + Name: bsName, + HealthCheckLink: hcLink, + Protocol: string(protocol), + SessionAffinity: string(l4.Service.Spec.SessionAffinity), + Scheme: string(cloud.SchemeInternal), + NamespacedName: l4.NamespacedName, + NetworkInfo: &l4.network, + ConnectionTrackingPolicy: noConnectionTrackingPolicy, + } + bs, err := l4.backendPool.EnsureL4BackendService(backendParams, l4.svcLogger) if err != nil { result.GCEResourceInError = annotations.BackendServiceResource result.Error = err diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go index 6a5501a96c..7fad42e3b0 100644 --- a/pkg/loadbalancers/l4_test.go +++ b/pkg/loadbalancers/l4_test.go @@ -86,13 +86,24 @@ func TestEnsureInternalBackendServiceUpdates(t *testing.T) { l4.healthChecks = healthchecksl4.Fake(fakeGCE, l4ilbParams.Recorder) bsName := l4.namer.L4Backend(l4.Service.Namespace, l4.Service.Name) - _, err := l4.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(svc.Spec.SessionAffinity), string(cloud.SchemeInternal), l4.NamespacedName, *network.DefaultNetwork(fakeGCE), noConnectionTrackingPolicy, klog.TODO()) + backendParams := backends.L4BackendServiceParams{ + Name: bsName, + HealthCheckLink: "", + Protocol: "TCP", + SessionAffinity: string(svc.Spec.SessionAffinity), + Scheme: string(cloud.SchemeInternal), + NamespacedName: l4.NamespacedName, + NetworkInfo: network.DefaultNetwork(fakeGCE), + ConnectionTrackingPolicy: noConnectionTrackingPolicy, + } + _, err := l4.backendPool.EnsureL4BackendService(backendParams, klog.TODO()) if err != nil { t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) } + backendParams.SessionAffinity = string(v1.ServiceAffinityNone) // Update the Internal Backend Service with a new ServiceAffinity - _, err = l4.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l4.NamespacedName, *network.DefaultNetwork(fakeGCE), noConnectionTrackingPolicy, klog.TODO()) + _, err = l4.backendPool.EnsureL4BackendService(backendParams, klog.TODO()) if err != nil { t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) } @@ -113,7 +124,8 @@ func TestEnsureInternalBackendServiceUpdates(t *testing.T) { if err != nil { t.Errorf("Failed to update backend service with new connection draining timeout - err %v", err) } - bs, err = l4.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l4.NamespacedName, *network.DefaultNetwork(fakeGCE), noConnectionTrackingPolicy, klog.TODO()) + // ensure the backend back to previous params + bs, err = l4.backendPool.EnsureL4BackendService(backendParams, klog.TODO()) if err != nil { t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) } @@ -292,7 +304,17 @@ func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) { if hcResult.Err != nil { t.Errorf("Failed to create healthcheck, err %v", hcResult.Err) } - _, err := l4.backendPool.EnsureL4BackendService(lbName, hcResult.HCLink, "TCP", string(l4.Service.Spec.SessionAffinity), string(cloud.SchemeInternal), l4.NamespacedName, *defaultNetwork, noConnectionTrackingPolicy, klog.TODO()) + backendParams := backends.L4BackendServiceParams{ + Name: lbName, + HealthCheckLink: hcResult.HCLink, + Protocol: "TCP", + SessionAffinity: string(svc.Spec.SessionAffinity), + Scheme: string(cloud.SchemeInternal), + NamespacedName: l4.NamespacedName, + NetworkInfo: defaultNetwork, + ConnectionTrackingPolicy: noConnectionTrackingPolicy, + } + _, err := l4.backendPool.EnsureL4BackendService(backendParams, klog.TODO()) if err != nil { t.Errorf("Failed to create backendservice, err %v", err) } diff --git a/pkg/loadbalancers/l4netlb.go b/pkg/loadbalancers/l4netlb.go index ffc1425136..28e72a37ca 100644 --- a/pkg/loadbalancers/l4netlb.go +++ b/pkg/loadbalancers/l4netlb.go @@ -311,10 +311,18 @@ func (l4netlb *L4NetLB) provideBackendService(syncResult *L4NetLBSyncResult, hcL protocol := utils.GetProtocol(servicePorts) connectionTrackingPolicy := l4netlb.connectionTrackingPolicy() - // TODO(cheungdavid): Create backend logger that contains backendName, - // backendVersion, and backendScope before passing to backendPool.EnsureL4BackendService(). - // See example in backendSyncer.ensureBackendService(). - bs, err := l4netlb.backendPool.EnsureL4BackendService(bsName, hcLink, string(protocol), string(l4netlb.Service.Spec.SessionAffinity), string(cloud.SchemeExternal), l4netlb.NamespacedName, *network.DefaultNetwork(l4netlb.cloud), connectionTrackingPolicy, l4netlb.svcLogger) + backendParams := backends.L4BackendServiceParams{ + Name: bsName, + HealthCheckLink: hcLink, + Protocol: string(protocol), + SessionAffinity: string(l4netlb.Service.Spec.SessionAffinity), + Scheme: string(cloud.SchemeExternal), + NamespacedName: l4netlb.NamespacedName, + NetworkInfo: network.DefaultNetwork(l4netlb.cloud), + ConnectionTrackingPolicy: connectionTrackingPolicy, + } + + bs, err := l4netlb.backendPool.EnsureL4BackendService(backendParams, l4netlb.svcLogger) if err != nil { if utils.IsUnsupportedFeatureError(err, strongSessionAffinityFeatureName) { syncResult.GCEResourceInError = annotations.BackendServiceResource diff --git a/pkg/neg/controller.go b/pkg/neg/controller.go index b59fd98ce0..f757374b9d 100644 --- a/pkg/neg/controller.go +++ b/pkg/neg/controller.go @@ -194,7 +194,9 @@ func NewController( podInformer.GetIndexer(), cloud, manager, + zoneGetter, enableDualStackNEG, + flags.F.EnableMultiSubnetCluster, logger, ) } else { @@ -310,8 +312,8 @@ func NewController( vmIpCandidateNodeCheck := zonegetter.CandidateAndUnreadyNodesFilter vmIpPortCandidateNodeCheck := zonegetter.CandidateNodesFilter - if zoneGetter.CheckNodeWithPredicate(oldNode, vmIpCandidateNodeCheck, logger) != zoneGetter.CheckNodeWithPredicate(currentNode, vmIpCandidateNodeCheck, logger) || - zoneGetter.CheckNodeWithPredicate(oldNode, vmIpPortCandidateNodeCheck, logger) != zoneGetter.CheckNodeWithPredicate(currentNode, vmIpPortCandidateNodeCheck, logger) { + if zoneGetter.IsNodeSelectedByFilter(oldNode, vmIpCandidateNodeCheck, logger) != zoneGetter.IsNodeSelectedByFilter(currentNode, vmIpCandidateNodeCheck, logger) || + zoneGetter.IsNodeSelectedByFilter(oldNode, vmIpPortCandidateNodeCheck, logger) != zoneGetter.IsNodeSelectedByFilter(currentNode, vmIpPortCandidateNodeCheck, logger) { logger.Info("Node has changed, enqueueing", "node", currentNode.Name) negController.enqueueNode(currentNode) } @@ -471,10 +473,6 @@ func (c *Controller) processService(key string) error { if service == nil { return fmt.Errorf("cannot convert to Service (%T)", obj) } - // TODO(cheungdavid): Remove this validation when single stack ipv6 endpoint is supported - if service.Spec.Type != apiv1.ServiceTypeLoadBalancer && isSingleStackIPv6Service(service) { - return fmt.Errorf("NEG is not supported for ipv6 only service (%T)", service) - } negUsage := metricscollector.NegServiceState{} svcPortInfoMap := make(negtypes.PortInfoMap) networkInfo, err := c.networkResolver.ServiceNetwork(service) @@ -514,6 +512,10 @@ func (c *Controller) processService(key string) error { } if len(svcPortInfoMap) != 0 { c.logger.V(2).Info("Syncing service", "service", key) + // TODO(cheungdavid): Remove this validation when single stack ipv6 endpoint is supported + if service.Spec.Type != apiv1.ServiceTypeLoadBalancer && isSingleStackIPv6Service(service) { + return fmt.Errorf("NEG is not supported for ipv6 only service (%T)", service) + } if err = c.syncNegStatusAnnotation(namespace, name, svcPortInfoMap); err != nil { return err } diff --git a/pkg/neg/controller_test.go b/pkg/neg/controller_test.go index 339a2ce133..ecc41e2cff 100644 --- a/pkg/neg/controller_test.go +++ b/pkg/neg/controller_test.go @@ -124,8 +124,8 @@ func newTestControllerWithParamsAndContext(kubeClient kubernetes.Interface, test kubeClient.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), &apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "kube-system", Name: "ingress-controller-config-test"}, Data: map[string]string{"enable-asm": "true"}}, metav1.CreateOptions{}) } nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) return NewController( kubeClient, @@ -1560,7 +1560,7 @@ func TestServiceIPFamilies(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { controller := newTestController(fake.NewSimpleClientset()) defer controller.stop() - testService := newTestService(controller, false, []int32{}) + testService := newTestService(controller, false, []int32{80}) testService.Spec.Type = tc.serviceType testService.Spec.IPFamilies = tc.ipFamilies testService.Spec.IPFamilyPolicy = tc.ipFamilyPolicy @@ -1734,8 +1734,8 @@ func validateServiceAnnotationWithPortInfoMap(t *testing.T, svc *apiv1.Service, } nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) zones, _ := zoneGetter.ListZones(negtypes.NodeFilterForEndpointCalculatorMode(portInfoMap.EndpointsCalculatorMode()), klog.TODO()) if !sets.NewString(expectZones...).Equal(sets.NewString(zones...)) { t.Errorf("Unexpected zones listed by the predicate function, got %v, want %v", zones, expectZones) @@ -1815,8 +1815,8 @@ func validateServiceStateAnnotationExceptNames(t *testing.T, svc *apiv1.Service, } } nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) // This routine is called from tests verifying L7 NEGs. zones, _ := zoneGetter.ListZones(negtypes.NodeFilterForEndpointCalculatorMode(negtypes.L7Mode), klog.TODO()) diff --git a/pkg/neg/manager_test.go b/pkg/neg/manager_test.go index dde91670db..da4cc959b9 100644 --- a/pkg/neg/manager_test.go +++ b/pkg/neg/manager_test.go @@ -77,13 +77,15 @@ const ( labelValue1 = "v1" labelValue2 = "v2" negName1 = "neg1" + + defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" ) func NewTestSyncerManager(kubeClient kubernetes.Interface) (*syncerManager, *gce.Cloud) { testContext := negtypes.NewTestContextWithKubeClient(kubeClient) nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) manager := newSyncerManager( testContext.NegNamer, record.NewFakeRecorder(100), diff --git a/pkg/neg/readiness/poller_test.go b/pkg/neg/readiness/poller_test.go index d4407b231b..167dad238b 100644 --- a/pkg/neg/readiness/poller_test.go +++ b/pkg/neg/readiness/poller_test.go @@ -68,7 +68,7 @@ func (p *testPatcher) Eval(t *testing.T, pod string, negKey, bsKey *meta.Key) { } func newFakePoller() *poller { - reflector := newTestReadinessReflector(negtypes.NewTestContext()) + reflector := newTestReadinessReflector(negtypes.NewTestContext(), false) poller := reflector.poller poller.patcher = &testPatcher{} return poller diff --git a/pkg/neg/readiness/reflector.go b/pkg/neg/readiness/reflector.go index 9673c374d9..ba57f85cfc 100644 --- a/pkg/neg/readiness/reflector.go +++ b/pkg/neg/readiness/reflector.go @@ -34,6 +34,7 @@ import ( "k8s.io/ingress-gce/pkg/neg/metrics" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/neg/types/shared" + "k8s.io/ingress-gce/pkg/utils/zonegetter" "k8s.io/klog/v2" "k8s.io/utils/clock" ) @@ -74,10 +75,16 @@ type readinessReflector struct { queue workqueue.RateLimitingInterface + zoneGetter *zonegetter.ZoneGetter + + // If enabled, the reflector will mark pods that are from the non-default + // subnet as ready without processing. + enableMultiSubnetCluster bool + logger klog.Logger } -func NewReadinessReflector(kubeClient kubernetes.Interface, podLister cache.Indexer, negCloud negtypes.NetworkEndpointGroupCloud, lookup NegLookup, enableDualStackNEG bool, logger klog.Logger) Reflector { +func NewReadinessReflector(kubeClient kubernetes.Interface, podLister cache.Indexer, negCloud negtypes.NetworkEndpointGroupCloud, lookup NegLookup, zoneGetter *zonegetter.ZoneGetter, enableDualStackNEG, enableMultiSubnetCluster bool, logger klog.Logger) Reflector { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ @@ -86,14 +93,16 @@ func NewReadinessReflector(kubeClient kubernetes.Interface, podLister cache.Inde recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "neg-readiness-reflector"}) logger = logger.WithName("ReadinessReflector") reflector := &readinessReflector{ - client: kubeClient, - podLister: podLister, - clock: clock.RealClock{}, - lookup: lookup, - eventBroadcaster: broadcaster, - eventRecorder: recorder, - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - logger: logger, + client: kubeClient, + podLister: podLister, + clock: clock.RealClock{}, + lookup: lookup, + eventBroadcaster: broadcaster, + eventRecorder: recorder, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + zoneGetter: zoneGetter, + enableMultiSubnetCluster: enableMultiSubnetCluster, + logger: logger, } poller := NewPoller(podLister, lookup, reflector, negCloud, enableDualStackNEG, logger) reflector.poller = poller @@ -198,6 +207,12 @@ func (r *readinessReflector) getExpectedNegCondition(pod *v1.Pod, neg, backendSe negs := r.lookup.ReadinessGateEnabledNegs(pod.Namespace, pod.Labels) // mark pod as ready if it belongs to no NEGs if len(negs) == 0 { + if r.enableMultiSubnetCluster && !r.zoneGetter.IsDefaultSubnetNode(pod.Spec.NodeName, r.logger) { + expectedCondition.Status = v1.ConditionTrue + expectedCondition.Reason = negReadyReason + expectedCondition.Message = fmt.Sprintf("Pod belongs to a node in non-default subnet. Marking condition %q to True.", shared.NegReadinessGate) + return expectedCondition + } expectedCondition.Status = v1.ConditionTrue expectedCondition.Reason = negReadyReason expectedCondition.Message = fmt.Sprintf("Pod does not belong to any NEG. Marking condition %q to True.", shared.NegReadinessGate) diff --git a/pkg/neg/readiness/reflector_test.go b/pkg/neg/readiness/reflector_test.go index 9f5f82a996..1528c8884c 100644 --- a/pkg/neg/readiness/reflector_test.go +++ b/pkg/neg/readiness/reflector_test.go @@ -28,10 +28,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/neg/types/shared" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/zonegetter" "k8s.io/klog/v2" clocktesting "k8s.io/utils/clock/testing" ) +const ( + defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" + + defaultTestSubnet = "default" + nonDefaultTestSubnet = "non-default" + + testServiceNamespace = "test-ns" +) + // fakeLookUp implements LookUp interface type fakeLookUp struct { readinessGateEnabled bool @@ -47,26 +58,51 @@ func (f *fakeLookUp) ReadinessGateEnabled(syncerKey negtypes.NegSyncerKey) bool return f.readinessGateEnabled } -func newTestReadinessReflector(testContext *negtypes.TestContext) *readinessReflector { - reflector := NewReadinessReflector(testContext.KubeClient, testContext.PodInformer.GetIndexer(), negtypes.NewAdapter(testContext.Cloud), &fakeLookUp{}, false, klog.TODO()) +func newTestReadinessReflector(testContext *negtypes.TestContext, enableMultiSubnetCluster bool) *readinessReflector { + fakeZoneGetter := zonegetter.NewFakeZoneGetter(testContext.NodeInformer, defaultTestSubnetURL, enableMultiSubnetCluster) + reflector := NewReadinessReflector( + testContext.KubeClient, + testContext.PodInformer.GetIndexer(), + negtypes.NewAdapter(testContext.Cloud), + &fakeLookUp{}, + fakeZoneGetter, + false, + enableMultiSubnetCluster, + klog.TODO(), + ) ret := reflector.(*readinessReflector) return ret } func TestSyncPod(t *testing.T) { + t.Parallel() fakeContext := negtypes.NewTestContext() - testReadinessReflector := newTestReadinessReflector(fakeContext) - client := fakeContext.KubeClient - podLister := testReadinessReflector.podLister - testlookUp := testReadinessReflector.lookup.(*fakeLookUp) - podName := "pod1" fakeClock := clocktesting.NewFakeClock(time.Now()) - testReadinessReflector.clock = fakeClock now := metav1.NewTime(fakeClock.Now()).Rfc3339Copy() + client := fakeContext.KubeClient + podLister := fakeContext.PodInformer.GetIndexer() + nodeLister := fakeContext.NodeInformer.GetIndexer() + nodeName := "instance1" - for _, tc := range []struct { + testReadinessReflector := newTestReadinessReflector(fakeContext, false) + testReadinessReflector.clock = fakeClock + testlookUp := testReadinessReflector.lookup.(*fakeLookUp) + + nodeLister.Add(&v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + Spec: v1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.1.0/24"}, + }, + }) + zonegetter.PopulateFakeNodeInformer(fakeContext.NodeInformer, false) + + testCases := []struct { desc string - mutateState func() + podName string + mutateState func(*fakeLookUp) inputKey string inputNeg *meta.Key inputBackendService *meta.Key @@ -74,49 +110,102 @@ func TestSyncPod(t *testing.T) { expectPod *v1.Pod }{ { - desc: "empty input", - mutateState: func() {}, + desc: "empty input", + mutateState: func(testlookUp *fakeLookUp) { + testlookUp.readinessGateEnabledNegs = []string{} + }, + expectExists: false, }, { desc: "no need to update when pod does not have neg readiness gate", - mutateState: func() { - pod := generatePod(testNamespace, podName, false, true, true) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod1", false, true, true) podLister.Add(pod) - client.CoreV1().Pods(testNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + testlookUp.readinessGateEnabledNegs = []string{} }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod1"), inputNeg: nil, expectExists: true, - expectPod: generatePod(testNamespace, podName, false, true, true), + expectPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: "pod1", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: shared.NegReadinessGate, + Status: v1.ConditionTrue, + Reason: negReadyReason, + }, + }, + }, + }, }, { desc: "no need to update 2 when pod already has neg condition status == true", - mutateState: func() { - pod := generatePod(testNamespace, podName, true, true, true) - podLister.Update(pod) - client.CoreV1().Pods(testNamespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod2", true, true, true) + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + testlookUp.readinessGateEnabledNegs = []string{} }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod2"), inputNeg: nil, expectExists: true, - expectPod: generatePod(testNamespace, podName, true, true, true), + expectPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: "pod2", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: v1.PodSpec{ + NodeName: nodeName, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: shared.NegReadinessGate}, + }, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: shared.NegReadinessGate, + Status: v1.ConditionTrue, + Reason: negReadyReason, + }, + }, + }, + }, }, { desc: "need to update pod but there is no Negs associated", - mutateState: func() { - pod := generatePod(testNamespace, podName, true, false, false) - podLister.Update(pod) - client.CoreV1().Pods(testNamespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod3", true, false, false) + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + testlookUp.readinessGateEnabledNegs = []string{} }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod3"), inputNeg: nil, expectExists: true, expectPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: podName, + Namespace: testServiceNamespace, + Name: "pod3", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, }, Spec: v1.PodSpec{ + NodeName: nodeName, ReadinessGates: []v1.PodReadinessGate{ {ConditionType: shared.NegReadinessGate}, }, @@ -135,23 +224,27 @@ func TestSyncPod(t *testing.T) { }, { desc: "need to update pod: there is NEGs associated but pod is not healthy", - mutateState: func() { - pod := generatePod(testNamespace, podName, true, false, false) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod4", true, false, false) pod.CreationTimestamp = now - podLister.Update(pod) - client.CoreV1().Pods(testNamespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) testlookUp.readinessGateEnabledNegs = []string{"neg1", "neg2"} }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod4"), inputNeg: nil, expectExists: true, expectPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: podName, + Namespace: testServiceNamespace, + Name: "pod4", CreationTimestamp: now, + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, }, Spec: v1.PodSpec{ + NodeName: nodeName, ReadinessGates: []v1.PodReadinessGate{ {ConditionType: shared.NegReadinessGate}, }, @@ -169,22 +262,26 @@ func TestSyncPod(t *testing.T) { }, { desc: "need to update pod: pod is not attached to health check", - mutateState: func() { - pod := generatePod(testNamespace, podName, true, false, false) - podLister.Update(pod) - client.CoreV1().Pods(testNamespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod5", true, false, false) + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) testlookUp.readinessGateEnabledNegs = []string{"neg1", "neg2"} }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod5"), inputNeg: meta.ZonalKey("neg1", "zone1"), inputBackendService: nil, expectExists: true, expectPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: podName, + Namespace: testServiceNamespace, + Name: "pod5", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, }, Spec: v1.PodSpec{ + NodeName: nodeName, ReadinessGates: []v1.PodReadinessGate{ {ConditionType: shared.NegReadinessGate}, }, @@ -203,24 +300,28 @@ func TestSyncPod(t *testing.T) { }, { desc: "timeout waiting for endpoint to become healthy in NEGs", - mutateState: func() { - pod := generatePod(testNamespace, podName, true, false, false) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod6", true, false, false) pod.CreationTimestamp = now - podLister.Update(pod) - client.CoreV1().Pods(testNamespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) testlookUp.readinessGateEnabledNegs = []string{"neg1", "neg2"} fakeClock.Step(unreadyTimeout) }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod6"), inputNeg: nil, expectExists: true, expectPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: podName, + Namespace: testServiceNamespace, + Name: "pod6", CreationTimestamp: now, + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, }, Spec: v1.PodSpec{ + NodeName: nodeName, ReadinessGates: []v1.PodReadinessGate{ {ConditionType: shared.NegReadinessGate}, }, @@ -239,22 +340,26 @@ func TestSyncPod(t *testing.T) { }, { desc: "need to update pod: pod is healthy in NEG ", - mutateState: func() { - pod := generatePod(testNamespace, podName, true, false, false) - podLister.Update(pod) - client.CoreV1().Pods(testNamespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, "pod7", true, false, false) + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) testlookUp.readinessGateEnabledNegs = []string{"neg1", "neg2"} }, - inputKey: keyFunc(testNamespace, podName), + inputKey: keyFunc(testServiceNamespace, "pod7"), inputNeg: meta.ZonalKey("neg1", "zone1"), inputBackendService: meta.GlobalKey("k8s-backendservice"), expectExists: true, expectPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, - Name: podName, + Namespace: testServiceNamespace, + Name: "pod7", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, }, Spec: v1.PodSpec{ + NodeName: nodeName, ReadinessGates: []v1.PodReadinessGate{ {ConditionType: shared.NegReadinessGate}, }, @@ -271,24 +376,146 @@ func TestSyncPod(t *testing.T) { }, }, }, - } { - tc.mutateState() - err := testReadinessReflector.syncPod(tc.inputKey, tc.inputNeg, tc.inputBackendService) - if err != nil { - t.Errorf("For test case %q, expect err to be nil, but got %v", tc.desc, err) - } + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + for _, enableMultiSubnetCluster := range []bool{true, false} { + testReadinessReflector.enableMultiSubnetCluster = enableMultiSubnetCluster + tc.mutateState(testlookUp) + err := testReadinessReflector.syncPod(tc.inputKey, tc.inputNeg, tc.inputBackendService) + if err != nil { + t.Errorf("For test case %q with enableMultiSubnetCluster = %v, expect syncPod() return nil, but got %v", tc.desc, enableMultiSubnetCluster, err) + } + + if tc.expectExists { + pod, err := fakeContext.KubeClient.CoreV1().Pods(testServiceNamespace).Get(context.TODO(), tc.expectPod.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("For test case %q with enableMultiSubnetCluster = %v, expect GetPod() to return nil, but got %v", tc.desc, enableMultiSubnetCluster, err) + } + // ignore creation timestamp for comparison + pod.CreationTimestamp = tc.expectPod.CreationTimestamp + if !reflect.DeepEqual(pod, tc.expectPod) { + t.Errorf("For test case %q with enableMultiSubnetCluster = %v, expect pod to be %v, but got %v", tc.desc, enableMultiSubnetCluster, tc.expectPod, pod) + } + } + } + }) + } +} + +func TestSyncPodMultipleSubnets(t *testing.T) { + t.Parallel() + fakeContext := negtypes.NewTestContext() + fakeClock := clocktesting.NewFakeClock(time.Now()) + client := fakeContext.KubeClient + podLister := fakeContext.PodInformer.GetIndexer() + + testReadinessReflector := newTestReadinessReflector(fakeContext, true) + testReadinessReflector.clock = fakeClock + testlookUp := testReadinessReflector.lookup.(*fakeLookUp) + + zonegetter.PopulateFakeNodeInformer(fakeContext.NodeInformer, true) + + testCases := []struct { + desc string + podName string + mutateState func(*fakeLookUp) + inputKey string + inputNeg *meta.Key + inputBackendService *meta.Key + expectPod *v1.Pod + }{ + { + desc: "need to update pod: pod belongs to a node without PodCIDR", + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, negtypes.TestNoPodCIDRPod, true, false, false) + pod.Spec.NodeName = negtypes.TestNoPodCIDRInstance + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + }, + inputKey: keyFunc(testServiceNamespace, negtypes.TestNoPodCIDRPod), + inputNeg: nil, + expectPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: negtypes.TestNoPodCIDRPod, + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: v1.PodSpec{ + NodeName: negtypes.TestNoPodCIDRInstance, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: shared.NegReadinessGate}, + }, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: shared.NegReadinessGate, + Status: v1.ConditionTrue, + Reason: negReadyReason, + Message: fmt.Sprintf("Pod belongs to a node in non-default subnet. Marking condition %q to True.", shared.NegReadinessGate), + }, + }, + }, + }, + }, + { + desc: "need to update pod: pod belongs to a node in non-default subnet", + mutateState: func(testlookUp *fakeLookUp) { + pod := generatePod(testServiceNamespace, negtypes.TestNonDefaultSubnetPod, true, false, false) + pod.Spec.NodeName = negtypes.TestNonDefaultSubnetInstance + podLister.Add(pod) + client.CoreV1().Pods(testServiceNamespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + }, + inputKey: keyFunc(testServiceNamespace, negtypes.TestNonDefaultSubnetPod), + inputNeg: nil, + expectPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: negtypes.TestNonDefaultSubnetPod, + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: v1.PodSpec{ + NodeName: negtypes.TestNonDefaultSubnetInstance, + ReadinessGates: []v1.PodReadinessGate{ + {ConditionType: shared.NegReadinessGate}, + }, + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: shared.NegReadinessGate, + Status: v1.ConditionTrue, + Reason: negReadyReason, + Message: fmt.Sprintf("Pod belongs to a node in non-default subnet. Marking condition %q to True.", shared.NegReadinessGate), + }, + }, + }, + }, + }, + } - if tc.expectExists { - pod, err := fakeContext.KubeClient.CoreV1().Pods(testNamespace).Get(context.TODO(), tc.expectPod.Name, metav1.GetOptions{}) + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tc.mutateState(testlookUp) + err := testReadinessReflector.syncPod(tc.inputKey, tc.inputNeg, tc.inputBackendService) if err != nil { - t.Errorf("For test case %q, expect err to be nil, but got %v", tc.desc, err) + t.Errorf("For test case %q with multi-subnet cluster enabled, expect err to be nil, but got %v", tc.desc, err) + } + + pod, err := fakeContext.KubeClient.CoreV1().Pods(testServiceNamespace).Get(context.TODO(), tc.expectPod.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("For test case %q with multi-subnet cluster enabled, expect err to be nil, but got %v", tc.desc, err) } // ignore creation timestamp for comparison pod.CreationTimestamp = tc.expectPod.CreationTimestamp if !reflect.DeepEqual(pod, tc.expectPod) { - t.Errorf("For test case %q, expect pod to be %v, but got %v", tc.desc, tc.expectPod, pod) + t.Errorf("For test case %q with multi-subnet cluster enabled, expect pod to be %v, but got %v", tc.desc, tc.expectPod, pod) } - } - + }) } } diff --git a/pkg/neg/readiness/utils_test.go b/pkg/neg/readiness/utils_test.go index 0c8aaab4d7..15d89bb433 100644 --- a/pkg/neg/readiness/utils_test.go +++ b/pkg/neg/readiness/utils_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/kubernetes/fake" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/neg/types/shared" + "k8s.io/ingress-gce/pkg/utils" "k8s.io/klog/v2" ) @@ -579,6 +580,12 @@ func generatePod(namespace, name string, hasNegReadinessGate, hasNegCondition, n ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: name, + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: v1.PodSpec{ + NodeName: "instance1", }, } if hasNegReadinessGate { diff --git a/pkg/neg/syncers/endpoints_calculator.go b/pkg/neg/syncers/endpoints_calculator.go index 77fbccb0da..07cf392476 100644 --- a/pkg/neg/syncers/endpoints_calculator.go +++ b/pkg/neg/syncers/endpoints_calculator.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/neg/metrics" "k8s.io/ingress-gce/pkg/neg/metrics/metricscollector" "k8s.io/ingress-gce/pkg/neg/types" @@ -93,7 +94,7 @@ func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.Endpoints metrics.PublishNegControllerErrorCountMetrics(err, true) continue } - if ok := l.zoneGetter.CheckNodeWithPredicate(node, zonegetter.CandidateAndUnreadyNodesFilter, l.logger); !ok { + if ok := l.zoneGetter.IsNodeSelectedByFilter(node, zonegetter.CandidateAndUnreadyNodesFilter, l.logger); !ok { l.logger.Info("Dropping Node from subset since it is not a valid LB candidate", "nodeName", node.Name) continue } @@ -126,7 +127,7 @@ func (l *LocalL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(eds []typ return subsetMap, podMap, err } -func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error { +func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, endpointsExcludedInCalculation int) error { // this should be a no-op for now return nil } @@ -196,37 +197,39 @@ func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpointsDegradedMode(eps []t return subsetMap, podMap, err } -func (l *ClusterL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error { +func (l *ClusterL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, endpointsExcludedInCalculation int) error { // this should be a no-op for now return nil } // L7EndpointsCalculator implements methods to calculate Network endpoints for VM_IP_PORT NEGs type L7EndpointsCalculator struct { - zoneGetter *zonegetter.ZoneGetter - servicePortName string - podLister cache.Indexer - nodeLister cache.Indexer - serviceLister cache.Indexer - syncerKey types.NegSyncerKey - networkEndpointType types.NetworkEndpointType - enableDualStackNEG bool - logger klog.Logger - syncMetricsCollector *metricscollector.SyncerMetrics + zoneGetter *zonegetter.ZoneGetter + servicePortName string + podLister cache.Indexer + nodeLister cache.Indexer + serviceLister cache.Indexer + syncerKey types.NegSyncerKey + networkEndpointType types.NetworkEndpointType + enableDualStackNEG bool + enableMultiSubnetCluster bool + logger klog.Logger + syncMetricsCollector *metricscollector.SyncerMetrics } func NewL7EndpointsCalculator(zoneGetter *zonegetter.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, syncerKey types.NegSyncerKey, logger klog.Logger, enableDualStackNEG bool, syncMetricsCollector *metricscollector.SyncerMetrics) *L7EndpointsCalculator { return &L7EndpointsCalculator{ - zoneGetter: zoneGetter, - servicePortName: syncerKey.PortTuple.Name, - podLister: podLister, - nodeLister: nodeLister, - serviceLister: serviceLister, - syncerKey: syncerKey, - networkEndpointType: syncerKey.NegType, - enableDualStackNEG: enableDualStackNEG, - logger: logger.WithName("L7EndpointsCalculator"), - syncMetricsCollector: syncMetricsCollector, + zoneGetter: zoneGetter, + servicePortName: syncerKey.PortTuple.Name, + podLister: podLister, + nodeLister: nodeLister, + serviceLister: serviceLister, + syncerKey: syncerKey, + networkEndpointType: syncerKey.NegType, + enableDualStackNEG: enableDualStackNEG, + enableMultiSubnetCluster: flags.F.EnableMultiSubnetCluster, + logger: logger.WithName("L7EndpointsCalculator"), + syncMetricsCollector: syncMetricsCollector, } } @@ -237,16 +240,16 @@ func (l *L7EndpointsCalculator) Mode() types.EndpointsCalculatorMode { // CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs. func (l *L7EndpointsCalculator) CalculateEndpoints(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, int, error) { - result, err := toZoneNetworkEndpointMap(eds, l.zoneGetter, l.podLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG, l.logger) + result, err := toZoneNetworkEndpointMap(eds, l.zoneGetter, l.podLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG, l.enableMultiSubnetCluster, l.logger) if err == nil { // If current calculation ends up in error, we trigger and emit metrics in degraded mode. l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount) } - return result.NetworkEndpointSet, result.EndpointPodMap, result.EPCount[negtypes.Duplicate], err + return result.NetworkEndpointSet, result.EndpointPodMap, result.EPCount[negtypes.Duplicate] + result.EPCount[negtypes.NodeInNonDefaultSubnet], err } // CalculateEndpoints determines the endpoints in the NEGs based on the current service endpoints and the current NEGs. func (l *L7EndpointsCalculator) CalculateEndpointsDegradedMode(eds []types.EndpointsData, _ map[string]types.NetworkEndpointSet) (map[string]types.NetworkEndpointSet, types.EndpointPodMap, error) { - result := toZoneNetworkEndpointMapDegradedMode(eds, l.zoneGetter, l.podLister, l.nodeLister, l.serviceLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG, l.logger) + result := toZoneNetworkEndpointMapDegradedMode(eds, l.zoneGetter, l.podLister, l.nodeLister, l.serviceLister, l.servicePortName, l.networkEndpointType, l.enableDualStackNEG, l.enableMultiSubnetCluster, l.logger) l.syncMetricsCollector.UpdateSyncerEPMetrics(l.syncerKey, result.EPCount, result.EPSCount) return result.NetworkEndpointSet, result.EndpointPodMap, nil } @@ -263,12 +266,12 @@ func nodeMapToString(nodeMap map[string][]*v1.Node) string { // // For L7 Endpoint Calculator, it returns error if one of the two checks fails: // 1. The endpoint count from endpointData doesn't equal to the one from endpointPodMap: -// endpiontPodMap removes the duplicated endpoints, and dupCount stores the number of duplicated it removed +// endpiontPodMap removes the duplicated endpoints, and endpointsExcludedInCalculation stores the number of duplicated it removed // and we compare the endpoint counts with duplicates // 2. The endpoint count from endpointData or the one from endpointPodMap is 0 -func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, dupCount int) error { +func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.EndpointsData, endpointPodMap types.EndpointPodMap, endpointsExcludedInCalculation int) error { // Endpoint count from EndpointPodMap - countFromPodMap := len(endpointPodMap) + dupCount + countFromPodMap := len(endpointPodMap) + endpointsExcludedInCalculation if countFromPodMap == 0 { l.logger.Info("Detected endpoint count from endpointPodMap going to zero", "endpointPodMap", endpointPodMap) return fmt.Errorf("%w: Detect endpoint count goes to zero", types.ErrEPCalculationCountZero) @@ -284,7 +287,7 @@ func (l *L7EndpointsCalculator) ValidateEndpoints(endpointData []types.Endpoints } if countFromEndpointData != countFromPodMap { - l.logger.Info("Detected error when comparing endpoint counts", "countFromEndpointData", countFromEndpointData, "countFromPodMap", countFromPodMap, "endpointData", endpointData, "endpointPodMap", endpointPodMap, "dupCount", dupCount) + l.logger.Info("Detected error when comparing endpoint counts", "countFromEndpointData", countFromEndpointData, "countFromPodMap", countFromPodMap, "endpointData", endpointData, "endpointPodMap", endpointPodMap, "endpointsExcludedInCalculation", endpointsExcludedInCalculation) return fmt.Errorf("%w: Detect endpoint mismatch, count from endpoint slice=%d, count after calculation=%d", types.ErrEPCountsDiffer, countFromEndpointData, countFromPodMap) } return nil diff --git a/pkg/neg/syncers/endpoints_calculator_test.go b/pkg/neg/syncers/endpoints_calculator_test.go index ff5ee5e124..98771a08d6 100644 --- a/pkg/neg/syncers/endpoints_calculator_test.go +++ b/pkg/neg/syncers/endpoints_calculator_test.go @@ -24,8 +24,8 @@ import ( "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" networkv1 "k8s.io/cloud-provider-gcp/crd/apis/network/v1" @@ -42,8 +42,8 @@ import ( func TestLocalGetEndpointSet(t *testing.T) { t.Parallel() nodeInformer := zonegetter.FakeNodeInformer() - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) - zonegetter.PopulateFakeNodeInformer(nodeInformer) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) defaultNetwork := network.NetworkInfo{IsDefault: true, K8sNetwork: "default"} testCases := []struct { @@ -172,8 +172,8 @@ func nodeInterfacesAnnotation(t *testing.T, network, ip string) string { func TestClusterGetEndpointSet(t *testing.T) { t.Parallel() nodeInformer := zonegetter.FakeNodeInformer() - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) - zonegetter.PopulateFakeNodeInformer(nodeInformer) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) defaultNetwork := network.NetworkInfo{IsDefault: true, K8sNetwork: "default"} testCases := []struct { desc string @@ -293,23 +293,18 @@ func TestClusterGetEndpointSet(t *testing.T) { func TestValidateEndpoints(t *testing.T) { t.Parallel() - - instance1 := testInstance1 - instance2 := testInstance2 - instance4 := testInstance4 testPortName := "" - testServiceNamespace := "namespace" + emptyNamedPort := "" + protocolTCP := v1.ProtocolTCP port80 := int32(80) - port81 := int32(81) - testIP1 := "10.100.1.1" - testIP2 := "10.100.1.2" - testIP3 := "10.100.2.2" - testIP4 := "10.100.4.1" - testPodName1 := "pod1" - testPodName2 := "pod2" - testPodName3 := "pod3" - testPodName4 := "pod4" - svcKey := fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace) + + instance1 := negtypes.TestInstance1 + instance2 := negtypes.TestInstance2 + duplicatePodName := "pod2-duplicate" + noPodCIDRInstance := negtypes.TestNoPodCIDRInstance + noPodCIDRPod := negtypes.TestNoPodCIDRPod + nonDefaultSubnetInstance := negtypes.TestNonDefaultSubnetInstance + nonDefaultSubnetPod := negtypes.TestNonDefaultSubnetPod svcPort := negtypes.NegSyncerKey{ Namespace: testServiceNamespace, Name: testServiceName, @@ -322,591 +317,444 @@ func TestValidateEndpoints(t *testing.T) { NegName: testNegName, } - nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) testContext := negtypes.NewTestContext() podLister := testContext.PodInformer.GetIndexer() - nodeLister := testContext.NodeInformer.GetIndexer() - serviceLister := testContext.ServiceInformer.GetIndexer() - L7EndpointsCalculator := NewL7EndpointsCalculator(zoneGetter, podLister, nodeLister, serviceLister, svcPort, klog.TODO(), testContext.EnableDualStackNEG, metricscollector.FakeSyncerMetrics()) - L4LocalEndpointCalculator := NewLocalL4ILBEndpointsCalculator(listers.NewNodeLister(nodeLister), zoneGetter, svcKey, klog.TODO(), &network.NetworkInfo{}) - L4ClusterEndpointCalculator := NewClusterL4ILBEndpointsCalculator(listers.NewNodeLister(nodeLister), zoneGetter, svcKey, klog.TODO(), &network.NetworkInfo{}) + addPodsToLister(podLister, getDefaultEndpointSlices()) - testEndpointPodMap := map[negtypes.NetworkEndpoint]types.NamespacedName{ - { - IP: testIP1, - Port: "80", - Node: instance1, - }: { + // Add duplicate pod that shares the same IP and node as pod2. + podLister.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: testServiceNamespace, - Name: testPodName1, - }, - { - IP: testIP2, - Port: "80", - Node: instance1, - }: { - Namespace: testServiceNamespace, - Name: testPodName2, + Name: duplicatePodName, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, }, - { - IP: testIP3, - Port: "81", - Node: instance2, - }: { - Namespace: testServiceNamespace, - Name: testPodName3, + Spec: v1.PodSpec{ + NodeName: instance2, }, - { - IP: testIP4, - Port: "81", - Node: instance4, - }: { - Namespace: testServiceNamespace, - Name: testPodName4, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.100.1.2", + PodIPs: []v1.PodIP{ + {IP: "10.100.1.2"}, + }, }, - } + }) - testCases := []struct { - desc string - ec negtypes.NetworkEndpointsCalculator - endpointsData []negtypes.EndpointsData - endpointPodMap map[negtypes.NetworkEndpoint]types.NamespacedName - dupCount int - expect error - }{ - { - desc: "ValidateEndpoints for L4 local endpoint calculator", // we are adding this test to make sure the test is updated when the functionality is added - ec: L4LocalEndpointCalculator, - endpointsData: nil, // for now it is a no-op - endpointPodMap: nil, // L4 EndpointCalculators do not compute endpoints pod data - dupCount: 0, - expect: nil, - }, - { + nodeLister := testContext.NodeInformer.GetIndexer() + serviceLister := testContext.ServiceInformer.GetIndexer() + zonegetter.PopulateFakeNodeInformer(testContext.NodeInformer, true) + zoneGetter := zonegetter.NewFakeZoneGetter(testContext.NodeInformer, defaultTestSubnetURL, false) + L7EndpointsCalculator := NewL7EndpointsCalculator(zoneGetter, podLister, nodeLister, serviceLister, svcPort, klog.TODO(), testContext.EnableDualStackNEG, metricscollector.FakeSyncerMetrics()) - desc: "ValidateEndpoints for L4 cluster endpoint calculator", // we are adding this test to make sure the test is updated when the functionality is added - ec: L4ClusterEndpointCalculator, - endpointsData: nil, // for now it is a no-op - endpointPodMap: nil, // L4 EndpointCalculators do not compute endpoints pod data - dupCount: 0, - expect: nil, - }, + zoneGetterMSC := zonegetter.NewFakeZoneGetter(testContext.NodeInformer, defaultTestSubnetURL, true) + L7EndpointsCalculatorMSC := NewL7EndpointsCalculator(zoneGetterMSC, podLister, nodeLister, serviceLister, svcPort, klog.TODO(), testContext.EnableDualStackNEG, metricscollector.FakeSyncerMetrics()) + L7EndpointsCalculatorMSC.enableMultiSubnetCluster = true + L4LocalEndpointCalculator := NewLocalL4ILBEndpointsCalculator(listers.NewNodeLister(nodeLister), zoneGetter, fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace), klog.TODO(), &network.NetworkInfo{}) + L4ClusterEndpointCalculator := NewClusterL4ILBEndpointsCalculator(listers.NewNodeLister(nodeLister), zoneGetter, fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace), klog.TODO(), &network.NetworkInfo{}) + + l7TestEPS := []*discovery.EndpointSlice{ { - desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts equal, endpointData has no duplicated endpoints", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, - }, - }, - Addresses: []negtypes.AddressData{ - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName1, - }, - NodeName: &instance1, - Addresses: []string{testIP1}, - Ready: true, - }, - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName2, - }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, + Name: "pod1", }, }, { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, + Name: "pod2", }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, - }, - }, - Addresses: []negtypes.AddressData{ - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName3, - }, - NodeName: &instance2, - Addresses: []string{testIP3}, - Ready: true, - }, - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName4, - }, - NodeName: &instance4, - Addresses: []string{testIP4}, - Ready: true, - }, - }, }, }, - endpointPodMap: testEndpointPodMap, - dupCount: 0, - expect: nil, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + } + noopMutation := func(ed []negtypes.EndpointsData, podMap negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) { + return ed, podMap + } + + testCases := []struct { + desc string + ec negtypes.NetworkEndpointsCalculator + ecMSC negtypes.NetworkEndpointsCalculator + testEndpointSlices []*discovery.EndpointSlice + currentMap map[string]negtypes.NetworkEndpointSet + // Use mutation to inject error into that we cannot trigger currently. + mutation func([]negtypes.EndpointsData, negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) + expect error + }{ + { + desc: "ValidateEndpoints for L4 local endpoint calculator", // we are adding this test to make sure the test is updated when the functionality is added + ec: L4LocalEndpointCalculator, + ecMSC: L4LocalEndpointCalculator, + testEndpointSlices: nil, // for now it is a no-op + mutation: noopMutation, + currentMap: nil, + expect: nil, + }, + { + desc: "ValidateEndpoints for L4 cluster endpoint calculator", // we are adding this test to make sure the test is updated when the functionality is added + ec: L4ClusterEndpointCalculator, + ecMSC: L4LocalEndpointCalculator, + testEndpointSlices: nil, // for now it is a no-op + mutation: noopMutation, + currentMap: nil, + expect: nil, }, { - desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts equal, endpointData has duplicated endpoints", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ + desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts equal, endpointData has no duplicated endpoints", + ec: L7EndpointsCalculator, + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: l7TestEPS, + mutation: noopMutation, + currentMap: nil, + expect: nil, + }, + { + desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts equal, endpointData has one duplicated endpoint", + ec: L7EndpointsCalculator, + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: []*discovery.EndpointSlice{ { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, }, }, - Addresses: []negtypes.AddressData{ + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ { + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName1, + Name: "pod1", }, - NodeName: &instance1, - Addresses: []string{testIP1}, - Ready: true, }, { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName2, + Name: "pod2", }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, - }, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, }, - }, - Addresses: []negtypes.AddressData{ { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName2, - }, + Addresses: []string{"10.100.1.2"}, NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, // this is a duplicated endpoint - { TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName3, + Name: duplicatePodName, }, - NodeName: &instance2, - Addresses: []string{testIP3}, - Ready: true, }, + }, + Ports: []discovery.EndpointPort{ { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName4, - }, - NodeName: &instance4, - Addresses: []string{testIP4}, - Ready: true, + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, }, }, }, }, - endpointPodMap: testEndpointPodMap, - dupCount: 1, - expect: nil, + mutation: noopMutation, + currentMap: nil, + expect: nil, }, { - desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts not equal, endpointData has no duplicated endpoints", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, - }, - }, - Addresses: []negtypes.AddressData{ - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName2, - }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, - }, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, - }, - }, - Addresses: []negtypes.AddressData{ - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName3, - }, - NodeName: &instance2, - Addresses: []string{testIP3}, - Ready: true, - }, - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName4, - }, - NodeName: &instance4, - Addresses: []string{testIP4}, - Ready: true, - }, - }, - }, + desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts not equal", + ec: L7EndpointsCalculator, + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: l7TestEPS, + mutation: func(endpointData []negtypes.EndpointsData, endpointPodMap negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) { + // Add one additional endpoint in endpointData + endpointData[0].Addresses = append(endpointData[0].Addresses, negtypes.AddressData{}) + return endpointData, endpointPodMap }, - endpointPodMap: testEndpointPodMap, - dupCount: 0, - expect: negtypes.ErrEPCountsDiffer, + currentMap: nil, + expect: negtypes.ErrEPCountsDiffer, }, { - desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts not equal, endpointData has duplicated endpoints", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, - }, - }, - Addresses: []negtypes.AddressData{ - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName2, - }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, - }, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, - }, - }, - Addresses: []negtypes.AddressData{ - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName2, - }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, // this is a duplicated endpoint - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName3, - }, - NodeName: &instance2, - Addresses: []string{testIP3}, - Ready: true, - }, - { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName4, - }, - NodeName: &instance4, - Addresses: []string{testIP4}, - Ready: true, - }, - }, - }, + desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointData has zero endpoint", + ec: L7EndpointsCalculator, + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: l7TestEPS, + mutation: func(endpointData []negtypes.EndpointsData, endpointPodMap negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) { + for i := range endpointData { + endpointData[i].Addresses = []negtypes.AddressData{} + } + return endpointData, endpointPodMap }, - endpointPodMap: testEndpointPodMap, - dupCount: 1, - expect: negtypes.ErrEPCountsDiffer, + currentMap: nil, + expect: negtypes.ErrEPSEndpointCountZero, }, { - desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointData has zero endpoint", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, - }, - }, - Addresses: []negtypes.AddressData{}, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, - }, - }, - Addresses: []negtypes.AddressData{}, - }, + desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointPodMap has zero endpoint", + ec: L7EndpointsCalculator, + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: l7TestEPS, + mutation: func(endpointData []negtypes.EndpointsData, endpointPodMap negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) { + endpointPodMap = negtypes.EndpointPodMap{} + return endpointData, endpointPodMap }, - endpointPodMap: testEndpointPodMap, - dupCount: 0, - expect: negtypes.ErrEPSEndpointCountZero, + currentMap: nil, + expect: negtypes.ErrEPCalculationCountZero, }, { - desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointPodMap has zero endpoint", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ + desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointData and endpointPodMap both have zero endpoint", + ec: L7EndpointsCalculator, + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: l7TestEPS, + mutation: func(endpointData []negtypes.EndpointsData, endpointPodMap negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) { + for i := range endpointData { + endpointData[i].Addresses = []negtypes.AddressData{} + } + endpointPodMap = negtypes.EndpointPodMap{} + return endpointData, endpointPodMap + }, + currentMap: nil, + expect: negtypes.ErrEPCalculationCountZero, // PodMap count is check and returned first, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + endpointData := negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices) + _, endpointPodMap, endpointsExcludedInCalculation, err := tc.ec.CalculateEndpoints(endpointData, tc.currentMap) + if err != nil { + t.Errorf("Received error when calculating endpoint: %v", err) + } + endpointData, endpointPodMap = tc.mutation(endpointData, endpointPodMap) + if got := tc.ec.ValidateEndpoints(endpointData, endpointPodMap, endpointsExcludedInCalculation); !errors.Is(got, tc.expect) { + t.Errorf("ValidateEndpoints() = %v, expected %v", got, tc.expect) + } + + // Run tests with multi-subnet cluster enabled. + endpointData = negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices) + _, endpointPodMap, endpointsExcludedInCalculation, err = tc.ecMSC.CalculateEndpoints(endpointData, tc.currentMap) + if err != nil { + t.Errorf("With multi-subnet cluster enabled, received error when calculating endpoint: %v", err) + } + endpointData, endpointPodMap = tc.mutation(endpointData, endpointPodMap) + if got := tc.ecMSC.ValidateEndpoints(endpointData, endpointPodMap, endpointsExcludedInCalculation); !errors.Is(got, tc.expect) { + t.Errorf("With multi-subnet cluster enabled, ValidateEndpoints() = %v, expected %v", got, tc.expect) + } + }) + } + + // Add noPodCIDRPod that corresponds to noPodCIDRInstance. + podLister.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: noPodCIDRPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + Spec: v1.PodSpec{ + NodeName: noPodCIDRInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.101.3.1", + PodIPs: []v1.PodIP{ + {IP: "10.101.3.1"}, + }, + }, + }) + podLister.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: nonDefaultSubnetPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + Spec: v1.PodSpec{ + NodeName: nonDefaultSubnetInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.200.1.1", + PodIPs: []v1.PodIP{ + {IP: "10.200.1.1"}, + }, + }, + }) + + podLister.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: nonDefaultSubnetPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + Spec: v1.PodSpec{ + NodeName: nonDefaultSubnetInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.200.1.1", + PodIPs: []v1.PodIP{ + {IP: "10.200.1.1"}, + }, + }, + }) + + mscTestCases := []struct { + desc string + ecMSC negtypes.NetworkEndpointsCalculator + testEndpointSlices []*discovery.EndpointSlice + currentMap map[string]negtypes.NetworkEndpointSet + // Use mutation to inject error into that we cannot trigger currently. + mutation func([]negtypes.EndpointsData, negtypes.EndpointPodMap) ([]negtypes.EndpointsData, negtypes.EndpointPodMap) + expect error + }{ + { + desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts equal, endpointData has an endpoint corresponds to node without PodCIDR", + ecMSC: L7EndpointsCalculator, + testEndpointSlices: []*discovery.EndpointSlice{ { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, }, }, - Addresses: []negtypes.AddressData{ + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName1, - }, + Addresses: []string{"10.100.1.1"}, NodeName: &instance1, - Addresses: []string{testIP1}, - Ready: true, - }, - { TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName2, + Name: "pod1", }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, }, - }, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, - }, - }, - Addresses: []negtypes.AddressData{ { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName3, + Name: "pod2", }, - NodeName: &instance2, - Addresses: []string{testIP3}, - Ready: true, }, { + Addresses: []string{"10.101.1.3"}, + NodeName: &noPodCIDRInstance, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName4, + Name: noPodCIDRPod, }, - NodeName: &instance4, - Addresses: []string{testIP4}, - Ready: true, }, }, - }, - }, - endpointPodMap: map[negtypes.NetworkEndpoint]types.NamespacedName{}, - dupCount: 0, - expect: negtypes.ErrEPCalculationCountZero, - }, - { - desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointData and endpointPodMap both have zero endpoint", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ + Ports: []discovery.EndpointPort{ { - Name: testPortName, - Port: port80, + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, }, }, - Addresses: []negtypes.AddressData{}, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, - }, - }, - Addresses: []negtypes.AddressData{}, }, }, - endpointPodMap: map[negtypes.NetworkEndpoint]types.NamespacedName{}, - dupCount: 0, - expect: negtypes.ErrEPCalculationCountZero, // PodMap count is check and returned first, + mutation: noopMutation, + currentMap: nil, + expect: nil, }, { - desc: "ValidateEndpoints for L7 Endpoint Calculator. EndpointData and endpointPodMap both have non-zero endpoints", - ec: L7EndpointsCalculator, - endpointsData: []negtypes.EndpointsData{ + desc: "ValidateEndpoints for L7 Endpoint Calculator. Endpoint counts equal, endpointData has non-default subnet endpoint", + ecMSC: L7EndpointsCalculatorMSC, + testEndpointSlices: []*discovery.EndpointSlice{ { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-1", + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port80, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, }, }, - Addresses: []negtypes.AddressData{ + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ { + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName1, + Name: "pod1", }, - NodeName: &instance1, - Addresses: []string{testIP1}, - Ready: true, }, { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName2, + Name: "pod2", }, - NodeName: &instance1, - Addresses: []string{testIP2}, - Ready: true, - }, - }, - }, - { - Meta: &metav1.ObjectMeta{ - Name: testServiceName + "-2", - Namespace: testServiceNamespace, - }, - Ports: []negtypes.PortData{ - { - Name: testPortName, - Port: port81, }, - }, - Addresses: []negtypes.AddressData{ { + Addresses: []string{"10.200.1.1"}, + NodeName: &nonDefaultSubnetInstance, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: testPodName3, + Name: nonDefaultSubnetPod, }, - NodeName: &instance2, - Addresses: []string{testIP3}, - Ready: true, }, + }, + Ports: []discovery.EndpointPort{ { - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: testPodName4, - }, - NodeName: &instance4, - Addresses: []string{testIP4}, - Ready: true, + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, }, }, }, }, - endpointPodMap: testEndpointPodMap, - dupCount: 0, - expect: nil, + mutation: noopMutation, + currentMap: nil, + expect: nil, }, } - for _, tc := range testCases { + for _, tc := range mscTestCases { t.Run(tc.desc, func(t *testing.T) { - if got := tc.ec.ValidateEndpoints(tc.endpointsData, tc.endpointPodMap, tc.dupCount); !errors.Is(got, tc.expect) { - t.Errorf("ValidateEndpoints() = %t, expected %t", got, tc.expect) + endpointData := negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices) + _, endpointPodMap, endpointsExcludedInCalculation, err := tc.ecMSC.CalculateEndpoints(endpointData, tc.currentMap) + if err != nil { + t.Errorf("With multi-subnet cluster enabled, received error when calculating endpoint: %v", err) + } + endpointData, endpointPodMap = tc.mutation(endpointData, endpointPodMap) + if got := tc.ecMSC.ValidateEndpoints(endpointData, endpointPodMap, endpointsExcludedInCalculation); !errors.Is(got, tc.expect) { + t.Errorf("With multi-subnet cluster enabled, ValidateEndpoints() = %v, expected %v", got, tc.expect) } }) } diff --git a/pkg/neg/syncers/transaction.go b/pkg/neg/syncers/transaction.go index 01fe13ac13..31737683f6 100644 --- a/pkg/neg/syncers/transaction.go +++ b/pkg/neg/syncers/transaction.go @@ -326,7 +326,7 @@ func (s *transactionSyncer) syncInternalImpl() error { // not enabled, so they would always be empty and reset error state. if len(notInDegraded) == 0 && len(onlyInDegraded) == 0 { s.logger.Info("Exit degraded mode") - if s.enableDegradedMode { + if s.enableDegradedMode && s.inErrorState() { s.recordEvent(apiv1.EventTypeNormal, "ExitDegradedMode", fmt.Sprintf("NEG %s is no longer in degraded mode", s.NegSyncerKey.String())) } s.resetErrorState() @@ -379,12 +379,12 @@ func (s *transactionSyncer) getEndpointsCalculation( endpointsData []negtypes.EndpointsData, currentMap map[string]negtypes.NetworkEndpointSet, ) (map[string]negtypes.NetworkEndpointSet, negtypes.EndpointPodMap, error) { - targetMap, endpointPodMap, dupCount, err := s.endpointsCalculator.CalculateEndpoints(endpointsData, currentMap) + targetMap, endpointPodMap, endpointsExcludedInCalculation, err := s.endpointsCalculator.CalculateEndpoints(endpointsData, currentMap) if err != nil { return nil, nil, err } if s.enableDegradedMode { - err = s.endpointsCalculator.ValidateEndpoints(endpointsData, endpointPodMap, dupCount) + err = s.endpointsCalculator.ValidateEndpoints(endpointsData, endpointPodMap, endpointsExcludedInCalculation) if err != nil { return nil, nil, err } diff --git a/pkg/neg/syncers/transaction_test.go b/pkg/neg/syncers/transaction_test.go index 8c5e36bbf5..260bdd6fc8 100644 --- a/pkg/neg/syncers/transaction_test.go +++ b/pkg/neg/syncers/transaction_test.go @@ -1531,8 +1531,8 @@ func TestIsZoneChange(t *testing.T) { func TestUnknownNodes(t *testing.T) { nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) testNetwork := cloud.ResourcePath("network", &meta.Key{Name: "test-network"}) testSubnetwork := cloud.ResourcePath("subnetwork", &meta.Key{Name: "test-subnetwork"}) fakeCloud := negtypes.NewFakeNetworkEndpointGroupCloud(testSubnetwork, testNetwork) @@ -1628,8 +1628,8 @@ func TestUnknownNodes(t *testing.T) { func TestEnableDegradedMode(t *testing.T) { t.Parallel() nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) negtypes.MockNetworkEndpointAPIs(fakeGCE) fakeCloud := negtypes.NewAdapter(fakeGCE) @@ -2261,8 +2261,8 @@ func newTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud, negTyp // TODO(freehan): use real readiness reflector reflector := &readiness.NoopReflector{} nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) negsyncer := NewTransactionSyncer(svcPort, record.NewFakeRecorder(100), diff --git a/pkg/neg/syncers/utils.go b/pkg/neg/syncers/utils.go index 8f25ab3c44..a885ce6e1c 100644 --- a/pkg/neg/syncers/utils.go +++ b/pkg/neg/syncers/utils.go @@ -238,7 +238,7 @@ type ZoneNetworkEndpointMapResult struct { } // toZoneNetworkEndpointMap translates addresses in endpoints object into zone and endpoints map, and also return the count for duplicated endpoints -func toZoneNetworkEndpointMap(eds []negtypes.EndpointsData, zoneGetter *zonegetter.ZoneGetter, podLister cache.Indexer, servicePortName string, networkEndpointType negtypes.NetworkEndpointType, enableDualStackNEG bool, logger klog.Logger) (ZoneNetworkEndpointMapResult, error) { +func toZoneNetworkEndpointMap(eds []negtypes.EndpointsData, zoneGetter *zonegetter.ZoneGetter, podLister cache.Indexer, servicePortName string, networkEndpointType negtypes.NetworkEndpointType, enableDualStackNEG, enableMultiSubnetCluster bool, logger klog.Logger) (ZoneNetworkEndpointMapResult, error) { zoneNetworkEndpointMap := map[string]negtypes.NetworkEndpointSet{} networkEndpointPodMap := negtypes.EndpointPodMap{} ipsForPod := ipsForPod(eds) @@ -285,8 +285,13 @@ func toZoneNetworkEndpointMap(eds []negtypes.EndpointsData, zoneGetter *zonegett globalEPCount[negtypes.Total] += 1 zone, _, getZoneErr := getEndpointZone(endpointAddress, zoneGetter, logger) if getZoneErr != nil { - epLogger.Error(getZoneErr, "Detected unexpected error when getting zone for endpoint") metrics.PublishNegControllerErrorCountMetrics(getZoneErr, true) + if enableMultiSubnetCluster && errors.Is(getZoneErr, zonegetter.ErrNodeNotInDefaultSubnet) { + epLogger.Error(getZoneErr, "Detected endpoint not from default subnet. Skipping") + localEPCount[negtypes.NodeInNonDefaultSubnet]++ + continue + } + epLogger.Error(getZoneErr, "Detected unexpected error when getting zone for endpoint") return ZoneNetworkEndpointMapResult{}, fmt.Errorf("unexpected error when getting zone for endpoint %q in endpoint slice %s/%s: %w", endpointAddress.Addresses, ed.Meta.Namespace, ed.Meta.Name, getZoneErr) } @@ -408,7 +413,7 @@ func getEndpointPod(endpointAddress negtypes.AddressData, podLister cache.Indexe // toZoneNetworkEndpointMap translates addresses in endpoints object into zone and endpoints map, and also return the count for duplicated endpoints // we will not raise error in degraded mode for misconfigured endpoints, instead they will be filtered directly -func toZoneNetworkEndpointMapDegradedMode(eds []negtypes.EndpointsData, zoneGetter *zonegetter.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, servicePortName string, networkEndpointType negtypes.NetworkEndpointType, enableDualStackNEG bool, logger klog.Logger) ZoneNetworkEndpointMapResult { +func toZoneNetworkEndpointMapDegradedMode(eds []negtypes.EndpointsData, zoneGetter *zonegetter.ZoneGetter, podLister, nodeLister, serviceLister cache.Indexer, servicePortName string, networkEndpointType negtypes.NetworkEndpointType, enableDualStackNEG, enableMultiSubnetCluster bool, logger klog.Logger) ZoneNetworkEndpointMapResult { zoneNetworkEndpointMap := map[string]negtypes.NetworkEndpointSet{} networkEndpointPodMap := negtypes.EndpointPodMap{} ipsForPod := ipsForPod(eds) @@ -458,8 +463,13 @@ func toZoneNetworkEndpointMapDegradedMode(eds []negtypes.EndpointsData, zoneGett } zone, getZoneErr := zoneGetter.ZoneForNode(nodeName, logger) if getZoneErr != nil { - epLogger.Error(getZoneErr, "Endpoint's corresponding node does not have valid zone information, skipping", "nodeName", nodeName) metrics.PublishNegControllerErrorCountMetrics(getZoneErr, true) + if enableMultiSubnetCluster && errors.Is(getZoneErr, zonegetter.ErrNodeNotInDefaultSubnet) { + epLogger.Error(getZoneErr, "Detected endpoint not from default subnet. Skipping", "nodeName", nodeName) + localEPCount[negtypes.NodeInNonDefaultSubnet]++ + continue + } + epLogger.Error(getZoneErr, "Endpoint's corresponding node does not have valid zone information, skipping", "nodeName", nodeName) localEPCount[negtypes.NodeNotFound]++ continue } @@ -619,7 +629,7 @@ func podContainsEndpointAddress(networkEndpoint negtypes.NetworkEndpoint, pod *a } } if matching != len(endpointIPs) { - return fmt.Errorf("%w: endpoint has at least one IP %v that does not match its pod's IP(s)", negtypes.ErrEPIPNotFromPod, endpointIPs) + return fmt.Errorf("%w: endpoint has at least one IP %v that does not match its pod's IP(s) %v", negtypes.ErrEPIPNotFromPod, endpointIPs, pod.Status.PodIPs) } return nil } diff --git a/pkg/neg/syncers/utils_test.go b/pkg/neg/syncers/utils_test.go index 9a90ea7536..4efd46c9cf 100644 --- a/pkg/neg/syncers/utils_test.go +++ b/pkg/neg/syncers/utils_test.go @@ -46,6 +46,8 @@ import ( "k8s.io/klog/v2" ) +const defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" + func TestEncodeDecodeEndpoint(t *testing.T) { ip := "10.0.0.10" instance := "somehost" @@ -492,8 +494,8 @@ func TestEnsureNetworkEndpointGroup(t *testing.T) { func TestToZoneNetworkEndpointMap(t *testing.T) { t.Parallel() nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) podLister := negtypes.NewTestContext().PodInformer.GetIndexer() testEndpointSlice := getDefaultEndpointSlices() addPodsToLister(podLister, testEndpointSlice) @@ -617,7 +619,7 @@ func TestToZoneNetworkEndpointMap(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - gotResult, err := toZoneNetworkEndpointMap(negtypes.EndpointsDataFromEndpointSlices(getDefaultEndpointSlices()), zoneGetter, podLister, tc.portName, tc.networkEndpointType, tc.enableDualStackNEG, klog.TODO()) + gotResult, err := toZoneNetworkEndpointMap(negtypes.EndpointsDataFromEndpointSlices(getDefaultEndpointSlices()), zoneGetter, podLister, tc.portName, tc.networkEndpointType, tc.enableDualStackNEG, false, klog.TODO()) if err != nil { t.Errorf("toZoneNetworkEndpointMap() = err %v, want no error", err) } @@ -744,467 +746,234 @@ func TestIpsForPod(t *testing.T) { } } -// TestValidateEndpointFields validates if toZoneNetworkEndpointMap -// returns correct type of error with invalid endpoint information -func TestValidateEndpointFields(t *testing.T) { - t.Parallel() +func TestRetrieveExistingZoneNetworkEndpointMap(t *testing.T) { nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) - podLister := negtypes.NewTestContext().PodInformer.GetIndexer() - addPodsToLister(podLister, getDefaultEndpointSlices()) - - instance1 := negtypes.TestInstance1 - instance3 := negtypes.TestInstance3 - instanceNotExist := "non-existent-node" - emptyZoneInstance := "empty-zone-instance" - zonegetter.AddFakeNodes(zoneGetter, "", emptyZoneInstance) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + negCloud := negtypes.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network") + negName := "test-neg-name" + irrelevantNegName := "irrelevant" + testIP1 := "1.2.3.4" + testIP2 := "1.2.3.5" + testIP3 := "1.2.3.6" + testIP4 := "1.2.3.7" + testIP5 := "1.2.3.8" + testIP6 := "1.2.3.9" + testIP7 := "1.2.3.10" + testPort := int64(80) - emptyNamedPort := "" - emptyNodeName := "" - port80 := int32(80) - protocolTCP := v1.ProtocolTCP + endpoint1 := negtypes.NetworkEndpoint{IP: testIP1, Node: negtypes.TestInstance1, Port: strconv.Itoa(int(testPort))} + endpoint2 := negtypes.NetworkEndpoint{IP: testIP2, Node: negtypes.TestInstance2, Port: strconv.Itoa(int(testPort))} + endpoint3 := negtypes.NetworkEndpoint{IP: testIP3, Node: negtypes.TestInstance3, Port: strconv.Itoa(int(testPort))} + endpoint4 := negtypes.NetworkEndpoint{IP: testIP4, Node: negtypes.TestInstance4, Port: strconv.Itoa(int(testPort))} + endpoint5 := negtypes.NetworkEndpoint{IP: testIP5, Node: negtypes.TestUnreadyInstance1, Port: strconv.Itoa(int(testPort))} + endpoint6 := negtypes.NetworkEndpoint{IP: testIP6, Node: negtypes.TestUpgradeInstance1, Port: strconv.Itoa(int(testPort))} + endpoint7 := negtypes.NetworkEndpoint{IP: testIP7, Node: negtypes.TestUpgradeInstance2, Port: strconv.Itoa(int(testPort))} testCases := []struct { - desc string - testEndpointSlice []*discovery.EndpointSlice - expectSets map[string]negtypes.NetworkEndpointSet - expectMap negtypes.EndpointPodMap - expectErr error + desc string + mutate func(cloud negtypes.NetworkEndpointGroupCloud) + mode negtypes.EndpointsCalculatorMode + expect map[string]negtypes.NetworkEndpointSet + expectAnnotationMap labels.EndpointPodLabelMap + expectErr bool }{ { - desc: "endpoints no missing or invalid fields", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &instance1, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod1", - }, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, - }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, - }, - }, - }, + desc: "neg does not exist", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) {}, + expectErr: true, + }, + { + desc: "neg only exists in one of the zone", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { + cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: testNegName, Version: meta.VersionGA}, negtypes.TestZone1, klog.TODO()) }, - expectSets: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.1.1||instance1||80")), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.3.1||instance3||80")), + expectErr: true, + }, + { + desc: "neg only exists in one of the zone plus irrelevant negs", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { + cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: irrelevantNegName, Version: meta.VersionGA}, negtypes.TestZone2, klog.TODO()) }, - expectMap: negtypes.EndpointPodMap{ - networkEndpointFromEncodedEndpoint("10.100.1.1||instance1||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, - networkEndpointFromEncodedEndpoint("10.100.3.1||instance3||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod4"}, + expectErr: true, + }, + { + desc: "empty negs exists in all 3 zones", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { + cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: testNegName, Version: meta.VersionGA}, negtypes.TestZone2, klog.TODO()) + cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: testNegName, Version: meta.VersionGA}, negtypes.TestZone4, klog.TODO()) }, - expectErr: nil, + expect: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet(), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet(), + negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), + }, + expectAnnotationMap: labels.EndpointPodLabelMap{}, + expectErr: false, }, { - desc: "include one endpoint that has missing nodeName", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: nil, // endpoint with missing nodeName - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod1", - }, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, - }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, + desc: "one empty and two non-empty negs", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { + cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone1, []*composite.NetworkEndpoint{ + { + Instance: negtypes.TestInstance1, + IpAddress: testIP1, + Port: testPort, + Annotations: map[string]string{ + "foo": "bar", }, }, + }, meta.VersionGA, klog.TODO()) + }, + expect: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet(endpoint1), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet(), + negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), + }, + expectAnnotationMap: labels.EndpointPodLabelMap{ + endpoint1: labels.PodLabelMap{ + "foo": "bar", }, }, - expectSets: nil, - expectMap: nil, - expectErr: negtypes.ErrEPNodeMissing, + expectErr: false, }, { - desc: "include one endpoint that has empty nodeName", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &emptyNodeName, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod1", - }, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, - }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, + desc: "one neg with multiple endpoints", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { + cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone1, []*composite.NetworkEndpoint{ + { + Instance: negtypes.TestInstance2, + IpAddress: testIP2, + Port: testPort, + Annotations: map[string]string{ + "foo": "bar", }, }, + }, meta.VersionGA, klog.TODO()) + }, + expect: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + endpoint1, + endpoint2, + ), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet(), + negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), + }, + expectAnnotationMap: labels.EndpointPodLabelMap{ + endpoint1: labels.PodLabelMap{ + "foo": "bar", + }, + endpoint2: labels.PodLabelMap{ + "foo": "bar", }, }, - expectSets: nil, - expectMap: nil, - expectErr: negtypes.ErrEPNodeMissing, + expectErr: false, }, { - desc: "include one endpoint that has missing pod", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &instance1, - TargetRef: nil, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, + desc: "2 negs with multiple endpoints", + mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { + cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone2, []*composite.NetworkEndpoint{ + { + Instance: negtypes.TestInstance3, + IpAddress: testIP3, + Port: testPort, + Annotations: map[string]string{ + "foo": "bar", }, }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, + { + Instance: negtypes.TestInstance4, + IpAddress: testIP4, + Port: testPort, + Annotations: map[string]string{ + "foo": "bar", }, }, - }, + }, meta.VersionGA, klog.TODO()) }, - expectSets: map[string]negtypes.NetworkEndpointSet{ + expect: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + endpoint1, + endpoint2, + ), negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.3.1||instance3||80")), - }, - expectMap: negtypes.EndpointPodMap{ - networkEndpointFromEncodedEndpoint("10.100.3.1||instance3||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod4"}, + endpoint3, + endpoint4, + ), + negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), }, - expectErr: nil, - }, - { - desc: "include one endpoint that does not correspond to an existing pod", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &instance1, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "foo", // this is a non-existing pod - }, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, - }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, - }, - }, + expectAnnotationMap: labels.EndpointPodLabelMap{ + endpoint1: labels.PodLabelMap{ + "foo": "bar", }, - }, - expectSets: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.3.1||instance3||80")), - }, - expectMap: negtypes.EndpointPodMap{ - networkEndpointFromEncodedEndpoint("10.100.3.1||instance3||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod4"}, - }, - expectErr: nil, - }, - { - desc: "include one endpoint that does not correspond to an existing node", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &instanceNotExist, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod1", - }, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, - }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, - }, - }, + endpoint2: labels.PodLabelMap{ + "foo": "bar", }, - }, - expectSets: nil, - expectMap: nil, - expectErr: negtypes.ErrEPNodeNotFound, - }, - { - desc: "include one endpoint that corresponds to an empty zone", - testEndpointSlice: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &emptyZoneInstance, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod1", - }, - }, - { - Addresses: []string{"10.100.3.1"}, - NodeName: &instance3, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod4", - }, - }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, - }, - }, + endpoint3: labels.PodLabelMap{ + "foo": "bar", + }, + endpoint4: labels.PodLabelMap{ + "foo": "bar", }, }, - expectSets: nil, - expectMap: nil, - expectErr: negtypes.ErrEPZoneMissing, - }, - } - for _, tc := range testCases { - result, err := toZoneNetworkEndpointMap(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlice), zoneGetter, podLister, "", negtypes.VmIpPortEndpointType, false, klog.TODO()) - if !errors.Is(err, tc.expectErr) { - t.Errorf("For case %q, expect %v error, but got %v.", tc.desc, tc.expectErr, err) - } - if !reflect.DeepEqual(result.NetworkEndpointSet, tc.expectSets) { - t.Errorf("For case %q, expecting endpoint set %v, but got %v.", tc.desc, tc.expectSets, result.NetworkEndpointSet) - } - if !reflect.DeepEqual(result.EndpointPodMap, tc.expectMap) { - t.Errorf("For case %q, expecting endpoint map %v, but got %v.", tc.desc, tc.expectMap, result.EndpointPodMap) - } - } -} - -func TestRetrieveExistingZoneNetworkEndpointMap(t *testing.T) { - nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) - negCloud := negtypes.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network") - negName := "test-neg-name" - irrelevantNegName := "irrelevant" - testIP1 := "1.2.3.4" - testIP2 := "1.2.3.5" - testIP3 := "1.2.3.6" - testIP4 := "1.2.3.7" - testIP5 := "1.2.3.8" - testIP6 := "1.2.3.9" - testIP7 := "1.2.3.10" - testPort := int64(80) - - endpoint1 := negtypes.NetworkEndpoint{IP: testIP1, Node: negtypes.TestInstance1, Port: strconv.Itoa(int(testPort))} - endpoint2 := negtypes.NetworkEndpoint{IP: testIP2, Node: negtypes.TestInstance2, Port: strconv.Itoa(int(testPort))} - endpoint3 := negtypes.NetworkEndpoint{IP: testIP3, Node: negtypes.TestInstance3, Port: strconv.Itoa(int(testPort))} - endpoint4 := negtypes.NetworkEndpoint{IP: testIP4, Node: negtypes.TestInstance4, Port: strconv.Itoa(int(testPort))} - endpoint5 := negtypes.NetworkEndpoint{IP: testIP5, Node: negtypes.TestUnreadyInstance1, Port: strconv.Itoa(int(testPort))} - endpoint6 := negtypes.NetworkEndpoint{IP: testIP6, Node: negtypes.TestUpgradeInstance1, Port: strconv.Itoa(int(testPort))} - endpoint7 := negtypes.NetworkEndpoint{IP: testIP7, Node: negtypes.TestUpgradeInstance2, Port: strconv.Itoa(int(testPort))} - - testCases := []struct { - desc string - mutate func(cloud negtypes.NetworkEndpointGroupCloud) - mode negtypes.EndpointsCalculatorMode - expect map[string]negtypes.NetworkEndpointSet - expectAnnotationMap labels.EndpointPodLabelMap - expectErr bool - }{ - { - desc: "neg does not exist", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) {}, - expectErr: true, - }, - { - desc: "neg only exists in one of the zone", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: testNegName, Version: meta.VersionGA}, negtypes.TestZone1, klog.TODO()) - }, - expectErr: true, - }, - { - desc: "neg only exists in one of the zone plus irrelevant negs", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: irrelevantNegName, Version: meta.VersionGA}, negtypes.TestZone2, klog.TODO()) - }, - expectErr: true, - }, - { - desc: "empty negs exists in all 3 zones", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: testNegName, Version: meta.VersionGA}, negtypes.TestZone2, klog.TODO()) - cloud.CreateNetworkEndpointGroup(&composite.NetworkEndpointGroup{Name: testNegName, Version: meta.VersionGA}, negtypes.TestZone4, klog.TODO()) - }, - expect: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet(), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet(), - negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), - }, - expectAnnotationMap: labels.EndpointPodLabelMap{}, - expectErr: false, + expectErr: false, }, { - desc: "one empty and two non-empty negs", + desc: "all 3 negs with multiple endpoints, endpoint6 and endpoint7 with no pod label", mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone1, []*composite.NetworkEndpoint{ + cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone4, []*composite.NetworkEndpoint{ { - Instance: negtypes.TestInstance1, - IpAddress: testIP1, + Instance: negtypes.TestUpgradeInstance1, + IpAddress: testIP6, + Port: testPort, + }, + { + Instance: negtypes.TestUpgradeInstance2, + IpAddress: testIP7, Port: testPort, - Annotations: map[string]string{ - "foo": "bar", - }, }, }, meta.VersionGA, klog.TODO()) }, expect: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet(endpoint1), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet(), - negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + endpoint1, + endpoint2, + ), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet( + endpoint3, + endpoint4, + ), + negtypes.TestZone4: negtypes.NewNetworkEndpointSet( + endpoint6, + endpoint7, + ), }, expectAnnotationMap: labels.EndpointPodLabelMap{ endpoint1: labels.PodLabelMap{ "foo": "bar", }, + endpoint2: labels.PodLabelMap{ + "foo": "bar", + }, + endpoint3: labels.PodLabelMap{ + "foo": "bar", + }, + endpoint4: labels.PodLabelMap{ + "foo": "bar", + }, + endpoint6: nil, + endpoint7: nil, }, expectErr: false, }, { - desc: "one neg with multiple endpoints", + desc: "irrelevant neg", mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone1, []*composite.NetworkEndpoint{ + cloud.AttachNetworkEndpoints(irrelevantNegName, negtypes.TestZone2, []*composite.NetworkEndpoint{ { - Instance: negtypes.TestInstance2, - IpAddress: testIP2, + Instance: negtypes.TestInstance3, + IpAddress: testIP4, Port: testPort, - Annotations: map[string]string{ - "foo": "bar", - }, }, }, meta.VersionGA, klog.TODO()) }, @@ -1213,140 +982,14 @@ func TestRetrieveExistingZoneNetworkEndpointMap(t *testing.T) { endpoint1, endpoint2, ), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet(), - negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), - }, - expectAnnotationMap: labels.EndpointPodLabelMap{ - endpoint1: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint2: labels.PodLabelMap{ - "foo": "bar", - }, - }, - expectErr: false, - }, - { - desc: "2 negs with multiple endpoints", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone2, []*composite.NetworkEndpoint{ - { - Instance: negtypes.TestInstance3, - IpAddress: testIP3, - Port: testPort, - Annotations: map[string]string{ - "foo": "bar", - }, - }, - { - Instance: negtypes.TestInstance4, - IpAddress: testIP4, - Port: testPort, - Annotations: map[string]string{ - "foo": "bar", - }, - }, - }, meta.VersionGA, klog.TODO()) - }, - expect: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet( - endpoint1, - endpoint2, - ), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - endpoint3, - endpoint4, - ), - negtypes.TestZone4: negtypes.NewNetworkEndpointSet(), - }, - expectAnnotationMap: labels.EndpointPodLabelMap{ - endpoint1: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint2: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint3: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint4: labels.PodLabelMap{ - "foo": "bar", - }, - }, - expectErr: false, - }, - { - desc: "all 3 negs with multiple endpoints, endpoint6 and endpoint7 with no pod label", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.AttachNetworkEndpoints(testNegName, negtypes.TestZone4, []*composite.NetworkEndpoint{ - { - Instance: negtypes.TestUpgradeInstance1, - IpAddress: testIP6, - Port: testPort, - }, - { - Instance: negtypes.TestUpgradeInstance2, - IpAddress: testIP7, - Port: testPort, - }, - }, meta.VersionGA, klog.TODO()) - }, - expect: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet( - endpoint1, - endpoint2, - ), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - endpoint3, - endpoint4, - ), - negtypes.TestZone4: negtypes.NewNetworkEndpointSet( - endpoint6, - endpoint7, - ), - }, - expectAnnotationMap: labels.EndpointPodLabelMap{ - endpoint1: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint2: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint3: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint4: labels.PodLabelMap{ - "foo": "bar", - }, - endpoint6: nil, - endpoint7: nil, - }, - expectErr: false, - }, - { - desc: "irrelevant neg", - mutate: func(cloud negtypes.NetworkEndpointGroupCloud) { - cloud.AttachNetworkEndpoints(irrelevantNegName, negtypes.TestZone2, []*composite.NetworkEndpoint{ - { - Instance: negtypes.TestInstance3, - IpAddress: testIP4, - Port: testPort, - }, - }, meta.VersionGA, klog.TODO()) - }, - expect: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet( - endpoint1, - endpoint2, - ), - negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - endpoint3, - endpoint4, - ), - negtypes.TestZone4: negtypes.NewNetworkEndpointSet( - endpoint6, - endpoint7, - ), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet( + endpoint3, + endpoint4, + ), + negtypes.TestZone4: negtypes.NewNetworkEndpointSet( + endpoint6, + endpoint7, + ), }, expectAnnotationMap: labels.EndpointPodLabelMap{ endpoint1: labels.PodLabelMap{ @@ -1921,8 +1564,8 @@ func TestToZoneNetworkEndpointMapDegradedMode(t *testing.T) { t.Parallel() nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) testContext := negtypes.NewTestContext() podLister := testContext.PodInformer.GetIndexer() addPodsToLister(podLister, getDefaultEndpointSlices()) @@ -1994,122 +1637,803 @@ func TestToZoneNetworkEndpointMapDegradedMode(t *testing.T) { networkEndpointFromEncodedEndpoint("10.100.1.3||instance1||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod5"}, networkEndpointFromEncodedEndpoint("10.100.1.4||instance1||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod6"}, }, - networkEndpointType: negtypes.VmIpPortEndpointType, - }, - { - desc: "named target port", - testEndpointSlices: getDefaultEndpointSlices(), - portName: testNamedPort, + networkEndpointType: negtypes.VmIpPortEndpointType, + }, + { + desc: "named target port", + testEndpointSlices: getDefaultEndpointSlices(), + portName: testNamedPort, + expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + networkEndpointFromEncodedEndpoint("10.100.2.2||instance2||81")), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet( + networkEndpointFromEncodedEndpoint("10.100.4.1||instance4||81"), + networkEndpointFromEncodedEndpoint("10.100.3.2||instance3||8081"), + networkEndpointFromEncodedEndpoint("10.100.4.2||instance4||8081"), + networkEndpointFromEncodedEndpoint("10.100.4.3||instance4||81"), + networkEndpointFromEncodedEndpoint("10.100.4.4||instance4||8081")), + }, + expectedPodMap: negtypes.EndpointPodMap{ + networkEndpointFromEncodedEndpoint("10.100.2.2||instance2||81"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod7"}, + networkEndpointFromEncodedEndpoint("10.100.4.1||instance4||81"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod8"}, + networkEndpointFromEncodedEndpoint("10.100.4.3||instance4||81"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod9"}, + networkEndpointFromEncodedEndpoint("10.100.3.2||instance3||8081"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod10"}, + networkEndpointFromEncodedEndpoint("10.100.4.2||instance4||8081"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod11"}, + networkEndpointFromEncodedEndpoint("10.100.4.4||instance4||8081"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod12"}, + }, + networkEndpointType: negtypes.VmIpPortEndpointType, + }, + { + desc: "Non-GCP network endpoints", + testEndpointSlices: getDefaultEndpointSlices(), + portName: testEmptyNamedPort, + expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + networkEndpointFromEncodedEndpoint("10.100.1.1||||80"), + networkEndpointFromEncodedEndpoint("10.100.1.2||||80"), + networkEndpointFromEncodedEndpoint("10.100.2.1||||80"), + networkEndpointFromEncodedEndpoint("10.100.1.3||||80"), + networkEndpointFromEncodedEndpoint("10.100.1.4||||80")), + negtypes.TestZone2: negtypes.NewNetworkEndpointSet( + networkEndpointFromEncodedEndpoint("10.100.3.1||||80")), + }, + expectedPodMap: negtypes.EndpointPodMap{ + networkEndpointFromEncodedEndpoint("10.100.1.1||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + networkEndpointFromEncodedEndpoint("10.100.1.2||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + networkEndpointFromEncodedEndpoint("10.100.2.1||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod3"}, + networkEndpointFromEncodedEndpoint("10.100.3.1||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod4"}, + networkEndpointFromEncodedEndpoint("10.100.1.3||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod5"}, + networkEndpointFromEncodedEndpoint("10.100.1.4||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod6"}, + }, + networkEndpointType: negtypes.NonGCPPrivateEndpointType, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + result := toZoneNetworkEndpointMapDegradedMode(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, nodeLister, serviceLister, tc.portName, tc.networkEndpointType, false, false, klog.TODO()) + if !reflect.DeepEqual(result.NetworkEndpointSet, tc.expectedEndpointMap) { + t.Errorf("degraded mode endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", result.NetworkEndpointSet, tc.expectedEndpointMap) + } + if !reflect.DeepEqual(result.EndpointPodMap, tc.expectedPodMap) { + t.Errorf("degraded mode endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", result.EndpointPodMap, tc.expectedPodMap) + } + }) + } +} + +// TestValidateEndpointFields validates if toZoneNetworkEndpointMap +// and toZoneNetworkEndpointMapDegradedMode return the correct endpoints and +// correct type of error with the supplied invalid endpoint information. +func TestValidateEndpointFields(t *testing.T) { + t.Parallel() + + emptyNamedPort := "" + emptyNodeName := "" + port80 := int32(80) + protocolTCP := v1.ProtocolTCP + instance1 := negtypes.TestInstance1 + instance2 := negtypes.TestInstance2 + instance3 := negtypes.TestInstance3 + instance4 := negtypes.TestInstance4 + emptyZoneInstance := negtypes.TestEmptyZoneInstance + emptyZonePod := "empty-zone-pod" // This should map to emptyZoneInstance. + notExistInstance := negtypes.TestNotExistInstance + + testContext := negtypes.NewTestContext() + podLister := testContext.PodInformer.GetIndexer() + addPodsToLister(podLister, getDefaultEndpointSlices()) + nodeLister := testContext.NodeInformer.GetIndexer() + zonegetter.PopulateFakeNodeInformer(testContext.NodeInformer, false) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(testContext.NodeInformer, defaultTestSubnetURL, false) + + // Add the pod that corresponds to empty zone instance. + podLister.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: emptyZonePod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + Spec: v1.PodSpec{ + NodeName: emptyZoneInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.100.5.1", + PodIPs: []v1.PodIP{ + {IP: "10.100.5.1"}, + }, + }, + }) + + testLabels := map[string]string{ + "run": "foo", + } + serviceLister := testContext.ServiceInformer.GetIndexer() + serviceLister.Add(&v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testServiceNamespace, + Name: testServiceName, + }, + Spec: v1.ServiceSpec{ + Selector: testLabels, + }, + }) + + // endpointMap and podMap contain all correct endpoints. + endpointMap := map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + ), + } + podMap := negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + } + + // endpointMapExcluded and podMapExcluded only includes valid endpoints. + // In normal mode, we exclude the specific endpoint for cases where an endpoint's pod information is invalid, including: + // 1. endpoint has an empty targetRef + // 2. endpoint's corresponding pod does not exist + // 3. endpoint corresponds to an object that fails pod type assertion + // + // In degraded mode, we should exclude the invalid endpoint for non-coverable cases(pod invalid or empty zone). + // We always inject to first endpoint, so the result only contain the second endpoint. + endpointMapExcluded := map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + ), + } + podMapExcluded := negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + } + + testCases := []struct { + desc string + testEndpointSlices []*discovery.EndpointSlice + expectedEndpointMap map[string]negtypes.NetworkEndpointSet + expectedPodMap negtypes.EndpointPodMap + expectErr error + expectedEndpointMapDegradedMode map[string]negtypes.NetworkEndpointSet + expectedPodMapDegradedMode negtypes.EndpointPodMap + }{ + { + desc: "endpoints with no errors, both calculations should have all endpoints", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod1", + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: endpointMap, + expectedPodMap: podMap, + expectErr: nil, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, + }, + { + desc: "include one endpoint that has missing nodeName, error will be raised in normal mode, nodeName should be filled in degraded mode", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: nil, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod1", + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: nil, + expectedPodMap: nil, + expectErr: negtypes.ErrEPNodeMissing, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, + }, + { + desc: "include one endpoint that has empty nodeName, error will be raised in normal mode, nodeName should be filled in degraded mode", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: &emptyNodeName, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod1", + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: nil, + expectedPodMap: nil, + expectErr: negtypes.ErrEPNodeMissing, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, + }, + { + desc: "include one endpoint that has missing pod, endpoint should be excluded in both calculations", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, + TargetRef: nil, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: endpointMapExcluded, + expectedPodMap: podMapExcluded, + expectErr: nil, + expectedEndpointMapDegradedMode: endpointMapExcluded, + expectedPodMapDegradedMode: podMapExcluded, + }, + { + desc: "include one endpoint that does not correspond to an existing pod, endpoint should be excluded in both calculations", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "foo", // this is a non-existing pod + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: endpointMapExcluded, + expectedPodMap: podMapExcluded, + expectErr: nil, + expectedEndpointMapDegradedMode: endpointMapExcluded, + expectedPodMapDegradedMode: podMapExcluded, + }, + { + desc: "include one endpoint that does not correspond to an existing node, error will be raised in normal mode, instance name will be deduced from pod and corrected in degraded mode", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: ¬ExistInstance, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod1", + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: nil, + expectedPodMap: nil, + expectErr: negtypes.ErrEPNodeNotFound, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, + }, + { + desc: "include one endpoint that corresponds to an empty zone, error will be raised in normal mode, endpoint should be excluded in degraded mode", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: &emptyZoneInstance, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: emptyZonePod, + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: nil, + expectedPodMap: nil, + expectErr: negtypes.ErrEPZoneMissing, + expectedEndpointMapDegradedMode: endpointMapExcluded, + expectedPodMapDegradedMode: podMapExcluded, + }, + { + // Endpoint IP and pod IP check is only performed during degraded + // mode, so in normal calculation, endpoints with not matching will + // be included. + desc: "single stack ipv4 endpoints, contains one endpoint with IPv4 address not matching to its pod, normal mode will include the invalid endpoint, endpoint should be removed in degraded mode", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod1", + }, + }, + { + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod2", + }, + }, + { + Addresses: []string{"10.100.2.2"}, // the IPv4 address of this pod is 10.100.2.1 + NodeName: &instance2, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod3", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, + expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.2.2", Node: instance2, Port: "80"}, + ), + }, + expectedPodMap: negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + negtypes.NetworkEndpoint{IP: "10.100.2.2", Node: instance2, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod3"}, + }, + expectErr: nil, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, + }, + { + // Endpoint IP and pod IP check is only performed during degraded + // mode, so in normal calculation, endpoints with not matching will + // be included. + desc: "dual stack endpoints, contains one endpoint with IPv6 address not matching to its pod, normal mode will include the invalid endpoint, endpoint should be removed in degraded mode", + testEndpointSlices: []*discovery.EndpointSlice{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv4", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"10.100.3.2"}, + NodeName: &instance3, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod10", + }, + }, + { + Addresses: []string{"10.100.4.2"}, + NodeName: &instance4, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod11", + }, + }, + { + Addresses: []string{"10.100.4.4"}, + NodeName: &instance4, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod12", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + }, + }, + AddressType: "IPv6", + Endpoints: []discovery.Endpoint{ + { + Addresses: []string{"a:b::1"}, + NodeName: &instance3, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod10", + }, + }, + { + Addresses: []string{"a:b::2"}, + NodeName: &instance4, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod11", + }, + }, + { + Addresses: []string{"a:b::4"}, // the IPv6 address of this pod is a:b::3 + NodeName: &instance4, + TargetRef: &v1.ObjectReference{ + Namespace: testServiceNamespace, + Name: "pod12", + }, + }, + }, + Ports: []discovery.EndpointPort{ + { + Name: &emptyNamedPort, + Port: &port80, + Protocol: &protocolTCP, + }, + }, + }, + }, expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.2.2||instance2||81")), negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.4.1||instance4||81"), - networkEndpointFromEncodedEndpoint("10.100.3.2||instance3||8081"), - networkEndpointFromEncodedEndpoint("10.100.4.2||instance4||8081"), - networkEndpointFromEncodedEndpoint("10.100.4.3||instance4||81"), - networkEndpointFromEncodedEndpoint("10.100.4.4||instance4||8081")), + negtypes.NetworkEndpoint{IP: "10.100.3.2", IPv6: "a:b::1", Node: instance3, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.4.2", IPv6: "a:b::2", Node: instance4, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.4.4", IPv6: "a:b::4", Node: instance4, Port: "80"}, + ), }, expectedPodMap: negtypes.EndpointPodMap{ - networkEndpointFromEncodedEndpoint("10.100.2.2||instance2||81"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod7"}, - networkEndpointFromEncodedEndpoint("10.100.4.1||instance4||81"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod8"}, - networkEndpointFromEncodedEndpoint("10.100.4.3||instance4||81"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod9"}, - networkEndpointFromEncodedEndpoint("10.100.3.2||instance3||8081"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod10"}, - networkEndpointFromEncodedEndpoint("10.100.4.2||instance4||8081"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod11"}, - networkEndpointFromEncodedEndpoint("10.100.4.4||instance4||8081"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod12"}, + negtypes.NetworkEndpoint{IP: "10.100.3.2", IPv6: "a:b::1", Node: instance3, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod10"}, + negtypes.NetworkEndpoint{IP: "10.100.4.2", IPv6: "a:b::2", Node: instance4, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod11"}, + negtypes.NetworkEndpoint{IP: "10.100.4.4", IPv6: "a:b::4", Node: instance4, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod12"}, }, - networkEndpointType: negtypes.VmIpPortEndpointType, - }, - { - desc: "Non-GCP network endpoints", - testEndpointSlices: getDefaultEndpointSlices(), - portName: testEmptyNamedPort, - expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone1: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.1.1||||80"), - networkEndpointFromEncodedEndpoint("10.100.1.2||||80"), - networkEndpointFromEncodedEndpoint("10.100.2.1||||80"), - networkEndpointFromEncodedEndpoint("10.100.1.3||||80"), - networkEndpointFromEncodedEndpoint("10.100.1.4||||80")), + expectErr: nil, + expectedEndpointMapDegradedMode: map[string]negtypes.NetworkEndpointSet{ negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - networkEndpointFromEncodedEndpoint("10.100.3.1||||80")), + negtypes.NetworkEndpoint{IP: "10.100.3.2", IPv6: "a:b::1", Node: instance3, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.4.2", IPv6: "a:b::2", Node: instance4, Port: "80"}, + ), }, - expectedPodMap: negtypes.EndpointPodMap{ - networkEndpointFromEncodedEndpoint("10.100.1.1||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, - networkEndpointFromEncodedEndpoint("10.100.1.2||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, - networkEndpointFromEncodedEndpoint("10.100.2.1||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod3"}, - networkEndpointFromEncodedEndpoint("10.100.3.1||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod4"}, - networkEndpointFromEncodedEndpoint("10.100.1.3||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod5"}, - networkEndpointFromEncodedEndpoint("10.100.1.4||||80"): types.NamespacedName{Namespace: testServiceNamespace, Name: "pod6"}, + expectedPodMapDegradedMode: negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.3.2", IPv6: "a:b::1", Node: instance3, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod10"}, + negtypes.NetworkEndpoint{IP: "10.100.4.2", IPv6: "a:b::2", Node: instance4, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod11"}, }, - networkEndpointType: negtypes.NonGCPPrivateEndpointType, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - result := toZoneNetworkEndpointMapDegradedMode(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, nodeLister, serviceLister, tc.portName, tc.networkEndpointType, false, klog.TODO()) - if !reflect.DeepEqual(result.NetworkEndpointSet, tc.expectedEndpointMap) { - t.Errorf("degraded mode endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", result.NetworkEndpointSet, tc.expectedEndpointMap) - } - if !reflect.DeepEqual(result.EndpointPodMap, tc.expectedPodMap) { - t.Errorf("degraded mode endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", result.EndpointPodMap, tc.expectedPodMap) + for _, enableMultiSubnetCluster := range []bool{true, false} { + result, err := toZoneNetworkEndpointMap(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, emptyNamedPort, negtypes.VmIpPortEndpointType, true, enableMultiSubnetCluster, klog.TODO()) + if !errors.Is(err, tc.expectErr) { + t.Errorf("normal mode with enableMultiSubnetCluster = %v, calculation got error %v, expected %v", err, enableMultiSubnetCluster, tc.expectErr) + } + if !reflect.DeepEqual(result.NetworkEndpointSet, tc.expectedEndpointMap) { + t.Errorf("normal mode with enableMultiSubnetCluster = %v, endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", enableMultiSubnetCluster, result.NetworkEndpointSet, tc.expectedEndpointMapDegradedMode) + } + if !reflect.DeepEqual(result.EndpointPodMap, tc.expectedPodMap) { + t.Errorf("normal mode with enableMultiSubnetCluster = %v, endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", enableMultiSubnetCluster, result.EndpointPodMap, tc.expectedPodMapDegradedMode) + } + + resultDegradedMode := toZoneNetworkEndpointMapDegradedMode(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, nodeLister, serviceLister, emptyNamedPort, negtypes.VmIpPortEndpointType, true, enableMultiSubnetCluster, klog.TODO()) + if !reflect.DeepEqual(resultDegradedMode.NetworkEndpointSet, tc.expectedEndpointMapDegradedMode) { + t.Errorf("degraded mode with enableMultiSubnetCluster = %v, endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", enableMultiSubnetCluster, resultDegradedMode.NetworkEndpointSet, tc.expectedEndpointMapDegradedMode) + } + if !reflect.DeepEqual(resultDegradedMode.EndpointPodMap, tc.expectedPodMapDegradedMode) { + t.Errorf("degraded mode with enableMultiSubnetCluster = %v, endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", enableMultiSubnetCluster, resultDegradedMode.EndpointPodMap, tc.expectedPodMapDegradedMode) + } } }) } } -func TestDegradedModeValidateEndpointInfo(t *testing.T) { +func TestValidateEndpointFieldsMultipleSubnets(t *testing.T) { t.Parallel() + emptyNamedPort := "" - emptyNodeName := "" port80 := int32(80) protocolTCP := v1.ProtocolTCP instance1 := negtypes.TestInstance1 - instance2 := negtypes.TestInstance2 - instance3 := negtypes.TestInstance3 - instance4 := negtypes.TestInstance4 - nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer) + + defaultSubnetLabelInstance := negtypes.TestDefaultSubnetLabelInstance + defaultSubnetLabelPod := negtypes.TestDefaultSubnetLabelPod + emptySubnetLabelInstance := negtypes.TestEmptySubnetLabelInstance + emptySubnetLabelPod := negtypes.TestEmptySubnetLabelPod + noPodCIDRInstance := negtypes.TestNoPodCIDRInstance + noPodCIDRPod := negtypes.TestNoPodCIDRPod + nonDefaultSubnetInstance := negtypes.TestNonDefaultSubnetInstance + nonDefaultSubnetPod := negtypes.TestNonDefaultSubnetPod + testContext := negtypes.NewTestContext() podLister := testContext.PodInformer.GetIndexer() addPodsToLister(podLister, getDefaultEndpointSlices()) - nodeLister := testContext.NodeInformer.GetIndexer() - nodeLister.Add(&v1.Node{ + zonegetter.PopulateFakeNodeInformer(testContext.NodeInformer, true) + fakeZoneGetter := zonegetter.NewFakeZoneGetter(testContext.NodeInformer, defaultTestSubnetURL, true) + + // Add defaultSubnetLabelPod that corresponds to defaultSubnetLabelInstance. + podLister.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: instance1, + Namespace: testServiceNamespace, + Name: defaultSubnetLabelPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, }, - Spec: v1.NodeSpec{ - PodCIDR: "10.100.1.0/24", - PodCIDRs: []string{"a:b::/48", "10.100.1.0/24"}, + Spec: v1.PodSpec{ + NodeName: defaultSubnetLabelInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.101.1.1", + PodIPs: []v1.PodIP{ + {IP: "10.101.1.1"}, + }, }, }) - nodeLister.Add(&v1.Node{ + + // Add emptySubnetLabelPod that corresponds to emptySubnetLabelInstance. + podLister.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: instance2, + Namespace: testServiceNamespace, + Name: emptySubnetLabelPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, }, - Spec: v1.NodeSpec{ - PodCIDR: "10.100.2.0/24", - PodCIDRs: []string{"a:b::/48", "10.100.2.0/24"}, + Spec: v1.PodSpec{ + NodeName: emptySubnetLabelInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.101.2.1", + PodIPs: []v1.PodIP{ + {IP: "10.101.2.1"}, + }, }, }) - nodeLister.Add(&v1.Node{ + + // Add noPodCIDRPod that corresponds to noPodCIDRInstance. + podLister.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: instance3, + Namespace: testServiceNamespace, + Name: noPodCIDRPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, }, - Spec: v1.NodeSpec{ - PodCIDR: "10.100.3.0/24", - PodCIDRs: []string{"a:b::/48", "10.100.3.0/24"}, + Spec: v1.PodSpec{ + NodeName: noPodCIDRInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.101.3.1", + PodIPs: []v1.PodIP{ + {IP: "10.101.3.1"}, + }, }, }) - nodeLister.Add(&v1.Node{ + + // Add nonDefaultSubnetPod that corresponds to nonDefaultSubnetInstance. + podLister.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: instance4, + Namespace: testServiceNamespace, + Name: nonDefaultSubnetPod, + Labels: map[string]string{ + discovery.LabelServiceName: testServiceName, + discovery.LabelManagedBy: managedByEPSControllerValue, + }, }, - Spec: v1.NodeSpec{ - PodCIDR: "10.100.4.0/24", - PodCIDRs: []string{"a:b::/48", "10.100.4.0/24"}, + Spec: v1.PodSpec{ + NodeName: nonDefaultSubnetInstance, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.200.1.1", + PodIPs: []v1.PodIP{ + {IP: "10.200.1.1"}, + }, }, }) @@ -2139,29 +2463,30 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { } testCases := []struct { - desc string - testEndpointSlices []*discovery.EndpointSlice - endpointType negtypes.NetworkEndpointType - expectedEndpointMap map[string]negtypes.NetworkEndpointSet - expectedPodMap negtypes.EndpointPodMap + desc string + testEndpointSlices []*discovery.EndpointSlice + expectedEndpointMap map[string]negtypes.NetworkEndpointSet + expectedPodMap negtypes.EndpointPodMap + expectErr error + expectedEndpointMapDegradedMode map[string]negtypes.NetworkEndpointSet + expectedPodMapDegradedMode negtypes.EndpointPodMap }{ { - desc: "contains one endpoint without nodeName, nodeName should be filled", + desc: "include one endpoint that corresponds to a node without subnet label, endpoint should be included in both calculations", testEndpointSlices: []*discovery.EndpointSlice{ { ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", + Name: testServiceName, Namespace: testServiceNamespace, Labels: map[string]string{ discovery.LabelServiceName: testServiceName, - discovery.LabelManagedBy: managedByEPSControllerValue, }, }, AddressType: "IPv4", Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.100.1.1"}, - NodeName: nil, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, Name: "pod1", @@ -2175,48 +2500,12 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { Name: "pod2", }, }, - }, - Ports: []discovery.EndpointPort{ - { - Name: &emptyNamedPort, - Port: &port80, - Protocol: &protocolTCP, - }, - }, - }, - }, - endpointType: negtypes.VmIpPortEndpointType, - expectedEndpointMap: endpointMap, - expectedPodMap: podMap, - }, - { - desc: "contains one endpoint with empty nodeName, nodeName should be filled", - testEndpointSlices: []*discovery.EndpointSlice{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", - Namespace: testServiceNamespace, - Labels: map[string]string{ - discovery.LabelServiceName: testServiceName, - discovery.LabelManagedBy: managedByEPSControllerValue, - }, - }, - AddressType: "IPv4", - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.100.1.1"}, - NodeName: &emptyNodeName, - TargetRef: &v1.ObjectReference{ - Namespace: testServiceNamespace, - Name: "pod1", - }, - }, { - Addresses: []string{"10.100.1.2"}, - NodeName: &instance1, + Addresses: []string{"10.101.1.1"}, + NodeName: &defaultSubnetLabelInstance, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod2", + Name: defaultSubnetLabelPod, }, }, }, @@ -2229,20 +2518,45 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { }, }, }, - endpointType: negtypes.VmIpPortEndpointType, - expectedEndpointMap: endpointMap, - expectedPodMap: podMap, + expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + ), + negtypes.TestZone5: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.101.1.1", Node: defaultSubnetLabelInstance, Port: "80"}, + ), + }, + expectedPodMap: negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + negtypes.NetworkEndpoint{IP: "10.101.1.1", Node: defaultSubnetLabelInstance, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: defaultSubnetLabelPod}, + }, + expectErr: nil, + expectedEndpointMapDegradedMode: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + ), + negtypes.TestZone5: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.101.1.1", Node: defaultSubnetLabelInstance, Port: "80"}, + ), + }, + expectedPodMapDegradedMode: negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + negtypes.NetworkEndpoint{IP: "10.101.1.1", Node: defaultSubnetLabelInstance, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: defaultSubnetLabelPod}, + }, }, { - desc: "single stack ipv4 endpoints, contains one endpoint with IPv4 address not matching to its pod, endpoint should be removed", + desc: "include one endpoint that corresponds to a node with empty string as subnet label, endpoint should be included in both calculations", testEndpointSlices: []*discovery.EndpointSlice{ { ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-1", + Name: testServiceName, Namespace: testServiceNamespace, Labels: map[string]string{ discovery.LabelServiceName: testServiceName, - discovery.LabelManagedBy: managedByEPSControllerValue, }, }, AddressType: "IPv4", @@ -2264,11 +2578,11 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { }, }, { - Addresses: []string{"10.100.2.2"}, // the IPv4 address of this pod is 10.100.2.1 - NodeName: &instance2, + Addresses: []string{"10.101.2.1"}, + NodeName: &emptySubnetLabelInstance, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod3", + Name: negtypes.TestEmptySubnetLabelPod, }, }, }, @@ -2281,16 +2595,42 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { }, }, }, - endpointType: negtypes.VmIpPortEndpointType, - expectedEndpointMap: endpointMap, - expectedPodMap: podMap, + expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + ), + negtypes.TestZone6: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.101.2.1", Node: emptySubnetLabelInstance, Port: "80"}, + ), + }, + expectedPodMap: negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + negtypes.NetworkEndpoint{IP: "10.101.2.1", Node: emptySubnetLabelInstance, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: emptySubnetLabelPod}, + }, + expectErr: nil, + expectedEndpointMapDegradedMode: map[string]negtypes.NetworkEndpointSet{ + negtypes.TestZone1: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}, + ), + negtypes.TestZone6: negtypes.NewNetworkEndpointSet( + negtypes.NetworkEndpoint{IP: "10.101.2.1", Node: emptySubnetLabelInstance, Port: "80"}, + ), + }, + expectedPodMapDegradedMode: negtypes.EndpointPodMap{ + negtypes.NetworkEndpoint{IP: "10.100.1.1", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod1"}, + negtypes.NetworkEndpoint{IP: "10.100.1.2", Node: instance1, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod2"}, + negtypes.NetworkEndpoint{IP: "10.101.2.1", Node: emptySubnetLabelInstance, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: emptySubnetLabelPod}, + }, }, { - desc: "dual stack endpoints, contains one endpoint with IPv6 address not matching to its pod, endpoint should be removed", + desc: "include one endpoint that corresponds to a node without PodCIDR, endpoint should be excluded in both calculations", testEndpointSlices: []*discovery.EndpointSlice{ { ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-3", + Name: testServiceName, Namespace: testServiceNamespace, Labels: map[string]string{ discovery.LabelServiceName: testServiceName, @@ -2299,27 +2639,27 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { AddressType: "IPv4", Endpoints: []discovery.Endpoint{ { - Addresses: []string{"10.100.3.2"}, - NodeName: &instance3, + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod10", + Name: "pod1", }, }, { - Addresses: []string{"10.100.4.2"}, - NodeName: &instance4, + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod11", + Name: "pod2", }, }, { - Addresses: []string{"10.100.4.4"}, - NodeName: &instance4, + Addresses: []string{"10.101.3.1"}, + NodeName: &noPodCIDRInstance, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod12", + Name: noPodCIDRPod, }, }, }, @@ -2331,38 +2671,48 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { }, }, }, + }, + expectedEndpointMap: endpointMap, + expectedPodMap: podMap, + expectErr: nil, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, + }, + { + desc: "include one endpoint that corresponds to a non-default subnet node, endpoint should be excluded in both calculations", + testEndpointSlices: []*discovery.EndpointSlice{ { ObjectMeta: metav1.ObjectMeta{ - Name: testServiceName + "-4", + Name: testServiceName, Namespace: testServiceNamespace, Labels: map[string]string{ discovery.LabelServiceName: testServiceName, }, }, - AddressType: "IPv6", + AddressType: "IPv4", Endpoints: []discovery.Endpoint{ { - Addresses: []string{"a:b::1"}, - NodeName: &instance3, + Addresses: []string{"10.100.1.1"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod10", + Name: "pod1", }, }, { - Addresses: []string{"a:b::2"}, - NodeName: &instance4, + Addresses: []string{"10.100.1.2"}, + NodeName: &instance1, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod11", + Name: "pod2", }, }, { - Addresses: []string{"a:b::4"}, // the IPv6 address of this pod is a:b::3 - NodeName: &instance4, + Addresses: []string{"10.200.1.1"}, + NodeName: &nonDefaultSubnetInstance, TargetRef: &v1.ObjectReference{ Namespace: testServiceNamespace, - Name: "pod12", + Name: nonDefaultSubnetPod, }, }, }, @@ -2375,27 +2725,32 @@ func TestDegradedModeValidateEndpointInfo(t *testing.T) { }, }, }, - endpointType: negtypes.VmIpPortEndpointType, - expectedEndpointMap: map[string]negtypes.NetworkEndpointSet{ - negtypes.TestZone2: negtypes.NewNetworkEndpointSet( - negtypes.NetworkEndpoint{IP: "10.100.3.2", IPv6: "a:b::1", Node: instance3, Port: "80"}, - negtypes.NetworkEndpoint{IP: "10.100.4.2", IPv6: "a:b::2", Node: instance4, Port: "80"}, - ), - }, - expectedPodMap: negtypes.EndpointPodMap{ - negtypes.NetworkEndpoint{IP: "10.100.3.2", IPv6: "a:b::1", Node: instance3, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod10"}, - negtypes.NetworkEndpoint{IP: "10.100.4.2", IPv6: "a:b::2", Node: instance4, Port: "80"}: types.NamespacedName{Namespace: testServiceNamespace, Name: "pod11"}, - }, + expectedEndpointMap: endpointMap, + expectedPodMap: podMap, + expectErr: nil, + expectedEndpointMapDegradedMode: endpointMap, + expectedPodMapDegradedMode: podMap, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - result := toZoneNetworkEndpointMapDegradedMode(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, nodeLister, serviceLister, emptyNamedPort, tc.endpointType, true, klog.TODO()) + result, err := toZoneNetworkEndpointMap(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, emptyNamedPort, negtypes.VmIpPortEndpointType, true, true, klog.TODO()) + if !errors.Is(err, tc.expectErr) { + t.Errorf("With multi-subnet cluste enabled, normal mode calculation got error %v, expected %v", err, tc.expectErr) + } if !reflect.DeepEqual(result.NetworkEndpointSet, tc.expectedEndpointMap) { - t.Errorf("degraded mode endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", result.NetworkEndpointSet, tc.expectedEndpointMap) + t.Errorf("With multi-subnet cluste enabled, normal mode endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", result.NetworkEndpointSet, tc.expectedEndpointMapDegradedMode) } if !reflect.DeepEqual(result.EndpointPodMap, tc.expectedPodMap) { - t.Errorf("degraded mode endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", result.EndpointPodMap, tc.expectedPodMap) + t.Errorf("With multi-subnet cluste enabled, normal mode endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", result.EndpointPodMap, tc.expectedPodMapDegradedMode) + } + + resultDegradedMode := toZoneNetworkEndpointMapDegradedMode(negtypes.EndpointsDataFromEndpointSlices(tc.testEndpointSlices), fakeZoneGetter, podLister, nodeLister, serviceLister, emptyNamedPort, negtypes.VmIpPortEndpointType, true, true, klog.TODO()) + if !reflect.DeepEqual(resultDegradedMode.NetworkEndpointSet, tc.expectedEndpointMapDegradedMode) { + t.Errorf("With multi-subnet cluste enabled, degraded mode endpoint set is not calculated correctly:\ngot %+v,\n expected %+v", resultDegradedMode.NetworkEndpointSet, tc.expectedEndpointMapDegradedMode) + } + if !reflect.DeepEqual(resultDegradedMode.EndpointPodMap, tc.expectedPodMapDegradedMode) { + t.Errorf("With multi-subnet cluste enabled, degraded mode endpoint map is not calculated correctly:\ngot %+v,\n expected %+v", resultDegradedMode.EndpointPodMap, tc.expectedPodMapDegradedMode) } }) } diff --git a/pkg/neg/types/fakes.go b/pkg/neg/types/fakes.go index 1755d615ca..c7e5109daf 100644 --- a/pkg/neg/types/fakes.go +++ b/pkg/neg/types/fakes.go @@ -29,20 +29,36 @@ import ( ) const ( - TestZone1 = "zone1" - TestZone2 = "zone2" - TestZone3 = "zone3" - TestZone4 = "zone4" - TestInstance1 = "instance1" - TestInstance2 = "instance2" - TestInstance3 = "instance3" - TestInstance4 = "instance4" - TestInstance5 = "instance5" - TestInstance6 = "instance6" - TestUnreadyInstance1 = "unready-instance1" - TestUnreadyInstance2 = "unready-instance2" - TestUpgradeInstance1 = "upgrade-instance1" - TestUpgradeInstance2 = "upgrade-instance2" + TestZone1 = "zone1" + TestZone2 = "zone2" + TestZone3 = "zone3" + TestZone4 = "zone4" + TestZone5 = "zone5" + TestZone6 = "zone6" + TestZone7 = "zone7" + TestZone8 = "zone8" + TestInstance1 = "instance1" + TestInstance2 = "instance2" + TestInstance3 = "instance3" + TestInstance4 = "instance4" + TestInstance5 = "instance5" + TestInstance6 = "instance6" + TestUnreadyInstance1 = "unready-instance1" + TestUnreadyInstance2 = "unready-instance2" + TestUpgradeInstance1 = "upgrade-instance1" + TestUpgradeInstance2 = "upgrade-instance2" + TestNotExistInstance = "not-exist-instance" + TestEmptyZoneInstance = "instance-empty-providerID" // This maps to an empty instance in PopulateFakeNodeInformer + TestEmptyZonePod = "pod-empty-providerID" + + TestDefaultSubnetLabelInstance = "default-subnet-label-instance" + TestDefaultSubnetLabelPod = "default-subnet-label-pod" + TestEmptySubnetLabelInstance = "empty-subnet-label-instance" + TestEmptySubnetLabelPod = "empty-subnet-label-pod" + TestNoPodCIDRInstance = "no-podcidr-instance" + TestNoPodCIDRPod = "no-podcidr-pod" + TestNonDefaultSubnetInstance = "non-default-subnet-instance" + TestNonDefaultSubnetPod = "non-default-subnet-pod" ) type FakeNetworkEndpointGroupCloud struct { diff --git a/pkg/neg/types/interfaces.go b/pkg/neg/types/interfaces.go index 89bee0ce8e..36be81e4eb 100644 --- a/pkg/neg/types/interfaces.go +++ b/pkg/neg/types/interfaces.go @@ -85,5 +85,5 @@ type NetworkEndpointsCalculator interface { // Mode indicates the mode that the EndpointsCalculator is operating in. Mode() EndpointsCalculatorMode // ValidateEndpoints validates the NEG endpoint information is correct - ValidateEndpoints(endpointData []EndpointsData, endpointPodMap EndpointPodMap, dupCount int) error + ValidateEndpoints(endpointData []EndpointsData, endpointPodMap EndpointPodMap, endpointsExcludedInCalculation int) error } diff --git a/pkg/neg/types/states.go b/pkg/neg/types/states.go index baca1eff13..c79cd24f54 100644 --- a/pkg/neg/types/states.go +++ b/pkg/neg/types/states.go @@ -19,19 +19,20 @@ package types type State string const ( - PodInvalid = State("PodInvalid") - PodTerminal = State("PodTerminal") - PodLabelMismatch = State("PodLabelMismatch") - NodeMissing = State("NodeMissing") - NodeNotFound = State("NodeNotFound") - ZoneMissing = State("ZoneMissing") - IPInvalid = State("IPInvalid") - IPNotFromPod = State("IPNotFromPod") - IPOutOfPodCIDR = State("IPOutOfPodCIDR") - OtherError = State("OtherError") - Duplicate = State("Duplicate") - DualStackMigration = State("DualStackMigration") // Total number of endpoints which require migration. - Total = State("Total") + PodInvalid = State("PodInvalid") + PodTerminal = State("PodTerminal") + PodLabelMismatch = State("PodLabelMismatch") + NodeMissing = State("NodeMissing") + NodeNotFound = State("NodeNotFound") + ZoneMissing = State("ZoneMissing") + IPInvalid = State("IPInvalid") + IPNotFromPod = State("IPNotFromPod") + IPOutOfPodCIDR = State("IPOutOfPodCIDR") + OtherError = State("OtherError") + Duplicate = State("Duplicate") + DualStackMigration = State("DualStackMigration") // Total number of endpoints which require migration. + NodeInNonDefaultSubnet = State("NodeInNonDefaultSubnet") + Total = State("Total") ) // StateCountMap collect the count of instances in different states diff --git a/pkg/neg/types/types_test.go b/pkg/neg/types/types_test.go index bcc0f2aa36..44e17e0d06 100644 --- a/pkg/neg/types/types_test.go +++ b/pkg/neg/types/types_test.go @@ -31,6 +31,8 @@ import ( "k8s.io/klog/v2" ) +const defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" + type negNamer struct{} func (*negNamer) NEG(namespace, name string, svcPort int32) string { @@ -751,8 +753,8 @@ func TestNodePredicateForEndpointCalculatorMode(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { predicate := NodeFilterForEndpointCalculatorMode(tc.epCalculatorMode) nodeInformer := zonegetter.FakeNodeInformer() - zonegetter.PopulateFakeNodeInformer(nodeInformer) - zoneGetter := zonegetter.NewZoneGetter(nodeInformer) + zonegetter.PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) zones, err := zoneGetter.ListZones(predicate, klog.TODO()) if err != nil { t.Errorf("Failed listing zones with predicate, err - %v", err) diff --git a/pkg/test/utils.go b/pkg/test/utils.go index 78aed3b85f..07f3d07590 100644 --- a/pkg/test/utils.go +++ b/pkg/test/utils.go @@ -238,7 +238,7 @@ func (s *FlagSaver) Reset(key flag, flagPointer *bool) { func CreateAndInsertNodes(gce *gce.Cloud, nodeNames []string, zoneName string) ([]*api_v1.Node, error) { nodes := []*api_v1.Node{} - for _, name := range nodeNames { + for i, name := range nodeNames { // Inserting the same node name twice causes an error - here we check if // the instance exists already before insertion. exists, err := GCEInstanceExists(name, gce) @@ -273,6 +273,8 @@ func CreateAndInsertNodes(gce *gce.Cloud, nodeNames []string, zoneName string) ( }, Spec: api_v1.NodeSpec{ ProviderID: fmt.Sprintf("gce://foo-project/%s/%s", zoneName, name), + PodCIDR: fmt.Sprintf("10.100.%d.0/24", i), + PodCIDRs: []string{fmt.Sprintf("10.100.%d.0/24", i)}, }, Status: api_v1.NodeStatus{ NodeInfo: api_v1.NodeSystemInfo{ diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 83163e516e..17ff0a4527 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -92,6 +92,9 @@ const ( // be removed in 1.18. LabelAlphaNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer" DualStackSubnetStackType = "IPV4_IPV6" + + // LabelNodeSubnet specifies the subnet name of this node. + LabelNodeSubnet = "cloud.google.com/gke-np-subnet" ) var networkTierErrorRegexp = regexp.MustCompile(`The network tier of external IP is STANDARD|PREMIUM, that of Address must be the same.`) diff --git a/pkg/utils/zonegetter/fake.go b/pkg/utils/zonegetter/fake.go index ba60d51346..ad483e610d 100644 --- a/pkg/utils/zonegetter/fake.go +++ b/pkg/utils/zonegetter/fake.go @@ -31,21 +31,27 @@ import ( "k8s.io/ingress-gce/pkg/utils" ) +const ( + defaultTestSubnet = "default" + nonDefaultTestSubnet = "non-default" + + defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default" +) + func FakeNodeInformer() cache.SharedIndexInformer { return informerv1.NewNodeInformer(fake.NewSimpleClientset(), 1*time.Second, utils.NewNamespaceIndexer()) } // DeleteFakeNodesInZone deletes all nodes in a zone. func DeleteFakeNodesInZone(t *testing.T, zone string, zoneGetter *ZoneGetter) { - nodeIndexer := zoneGetter.nodeInformer.GetIndexer() - nodes, err := listers.NewNodeLister(nodeIndexer).List(labels.Everything()) + nodes, err := listers.NewNodeLister(zoneGetter.nodeLister).List(labels.Everything()) if err != nil { t.Errorf("Failed listing nodes in zone %q, err - %v", zone, err) } for _, node := range nodes { nodeZone, _ := getZone(node) if nodeZone == zone { - if err := nodeIndexer.Delete(node); err != nil { + if err := zoneGetter.nodeLister.Delete(node); err != nil { t.Errorf("Failed to delete node %q in zone %q, err - %v", node.Name, zone, err) } } @@ -54,13 +60,18 @@ func DeleteFakeNodesInZone(t *testing.T, zone string, zoneGetter *ZoneGetter) { // AddFakeNodes adds fake nodes to the ZoneGetter in the provided zone. func AddFakeNodes(zoneGetter *ZoneGetter, newZone string, instances ...string) error { - for _, instance := range instances { - if err := zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ + for i, instance := range instances { + if err := zoneGetter.nodeLister.Add(&apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: instance, + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, }, Spec: apiv1.NodeSpec{ ProviderID: fmt.Sprintf("gce://foo-project/%s/instance1", newZone), + PodCIDR: fmt.Sprintf("10.100.%d.0/24", i), + PodCIDRs: []string{fmt.Sprintf("10.100.%d.0/24", i)}, }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ @@ -79,14 +90,14 @@ func AddFakeNodes(zoneGetter *ZoneGetter, newZone string, instances ...string) e // AddFakeNode adds fake node to the ZoneGetter. func AddFakeNode(zoneGetter *ZoneGetter, node *apiv1.Node) error { - if err := zoneGetter.nodeInformer.GetIndexer().Add(node); err != nil { + if err := zoneGetter.nodeLister.Add(node); err != nil { return err } return nil } // PopulateFakeNodeInformer populates a fake node informer with fake nodes. -func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { +func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer, addMSCNodes bool) { // Ready nodes. if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -94,6 +105,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone1/instance1", + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.1.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -119,6 +132,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone1/instance2", + PodCIDR: "10.100.2.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.2.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -144,6 +159,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone2/instance3", + PodCIDR: "10.100.3.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.3.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -169,6 +186,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone2/instance4", + PodCIDR: "10.100.4.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.4.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -194,6 +213,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone2/instance5", + PodCIDR: "10.100.5.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.5.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -219,6 +240,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone2/instance6", + PodCIDR: "10.100.6.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.6.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -245,6 +268,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone3/unready-instance1", + PodCIDR: "10.100.7.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.7.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -270,6 +295,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone3/unready-instance2", + PodCIDR: "10.100.8.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.8.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -299,6 +326,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone4/upgrade-instance1", + PodCIDR: "10.100.9.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.9.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -327,6 +356,8 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/zone4/upgrade-instance2", + PodCIDR: "10.100.10.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.10.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -351,6 +382,13 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "instance-empty-providerID", + Labels: map[string]string{ + "operation.gke.io/type": "drain", + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.11.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.11.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -375,9 +413,14 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "instance-invalid-providerID", + Labels: map[string]string{ + "operation.gke.io/type": "drain", + }, }, Spec: apiv1.NodeSpec{ ProviderID: "gce://foo-project/instance-invalid-providerID", + PodCIDR: "10.100.12.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.12.0/24"}, }, Status: apiv1.NodeStatus{ Addresses: []apiv1.NodeAddress{ @@ -394,6 +437,153 @@ func PopulateFakeNodeInformer(nodeInformer cache.SharedIndexInformer) { }, }, }); err != nil { - fmt.Printf("Failed to add node instance-empty-providerID: %v\n", err) + fmt.Printf("Failed to add node instance-invalid-providerID: %v\n", err) + } + + // Node with empty zone in providerID. + // This should not affect any tests since the zoneGetter will ignore this. + if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "instance-empty-zone-providerID", + }, + Spec: apiv1.NodeSpec{ + ProviderID: "gce://foo-project//instance-empty-zone-providerID", + PodCIDR: "10.100.13.0/24", + PodCIDRs: []string{"a:b::/48", "10.100.13.0/24"}, + }, + Status: apiv1.NodeStatus{ + Addresses: []apiv1.NodeAddress{ + { + Type: apiv1.NodeInternalIP, + Address: fmt.Sprintf("1.2.3.12"), + }, + }, + Conditions: []apiv1.NodeCondition{ + { + Type: apiv1.NodeReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + }); err != nil { + fmt.Printf("Failed to add node instance-empty-zone-providerID: %v\n", err) + } + + if addMSCNodes { + // Add a node does not have subnet label. + if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-subnet-label-instance", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + ProviderID: "gce://foo-project/zone5/default-subnet-label-instance", + PodCIDR: "10.101.1.0/24", + PodCIDRs: []string{"a:b::/48", "10.101.1.0/24"}, + }, + Status: apiv1.NodeStatus{ + Addresses: []apiv1.NodeAddress{ + { + Type: apiv1.NodeInternalIP, + Address: fmt.Sprintf("1.3.3.1"), + }, + }, + Conditions: []apiv1.NodeCondition{ + { + Type: apiv1.NodeReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + }); err != nil { + fmt.Printf("Failed to add node default-subnet-label-instance: %v\n", err) + } + // Add a node with empty subnet label. + if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-subnet-label-instance", + Labels: map[string]string{ + utils.LabelNodeSubnet: "", + }, + }, + Spec: apiv1.NodeSpec{ + ProviderID: "gce://foo-project/zone6/empty-subnet-label-instance", + PodCIDR: "10.101.2.0/24", + PodCIDRs: []string{"a:b::/48", "10.101.2.0/24"}, + }, + Status: apiv1.NodeStatus{ + Addresses: []apiv1.NodeAddress{ + { + Type: apiv1.NodeInternalIP, + Address: fmt.Sprintf("1.3.3.2"), + }, + }, + Conditions: []apiv1.NodeCondition{ + { + Type: apiv1.NodeReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + }); err != nil { + fmt.Printf("Failed to add node empty-subnet-label-instance: %v\n", err) + } + // Add a node with PodCIDR not set. + if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-podcidr-instance", + }, + Spec: apiv1.NodeSpec{ + ProviderID: "gce://foo-project/zone7/no-podcidr-instance", + }, + Status: apiv1.NodeStatus{ + Addresses: []apiv1.NodeAddress{ + { + Type: apiv1.NodeInternalIP, + Address: fmt.Sprintf("1.3.3.3"), + }, + }, + Conditions: []apiv1.NodeCondition{ + { + Type: apiv1.NodeReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + }); err != nil { + fmt.Printf("Failed to add node no-podcidr-instance: %v\n", err) + } + // Add a node that is not in the default subnet and a pod corresponds to this node. + if err := nodeInformer.GetIndexer().Add(&apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-default-subnet-instance", + Labels: map[string]string{ + utils.LabelNodeSubnet: nonDefaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + ProviderID: "gce://foo-project/zone8/non-default-subnet-instance", + PodCIDR: "10.200.1.0/24", + PodCIDRs: []string{"a:b::/48", "10.200.1.0/24"}, + }, + Status: apiv1.NodeStatus{ + Addresses: []apiv1.NodeAddress{ + { + Type: apiv1.NodeInternalIP, + Address: fmt.Sprintf("2.2.3.1"), + }, + }, + Conditions: []apiv1.NodeCondition{ + { + Type: apiv1.NodeReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + }); err != nil { + fmt.Printf("Failed to add node non-default-subnet-instance: %v\n", err) + } } } diff --git a/pkg/utils/zonegetter/zone_getter.go b/pkg/utils/zonegetter/zone_getter.go index 02adf94353..c0c13957c2 100644 --- a/pkg/utils/zonegetter/zone_getter.go +++ b/pkg/utils/zonegetter/zone_getter.go @@ -27,6 +27,7 @@ import ( listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils" "k8s.io/klog/v2" ) @@ -49,29 +50,10 @@ const ( CandidateAndUnreadyNodesFilter = Filter("CandidateAndUnreadyNodesFilter") ) -// nodeConditionPredicate is a function that indicates whether the given node's conditions meet -// some set of criteria defined by the function. -type nodeConditionPredicate func(*api_v1.Node, klog.Logger) bool - -var ( - // allNodesPredicate selects all nodes. - allNodesPredicate = func(*api_v1.Node, klog.Logger) bool { return true } - // candidateNodesPredicate selects all nodes that are in ready state and devoid of any exclude labels. - // This is a duplicate definition of the function in: - // https://github.com/kubernetes/kubernetes/blob/3723713c550f649b6ba84964edef9da6cc334f9d/staging/src/k8s.io/cloud-provider/controllers/service/controller.go#L668 - candidateNodesPredicate = func(node *api_v1.Node, logger klog.Logger) bool { - return nodePredicateInternal(node, false, false, logger) - } - // candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes selects all nodes except ones that are upgrading and/or have any exclude labels. This function tolerates unready nodes. - // TODO(prameshj) - Once the kubernetes/kubernetes Predicate function includes Unready nodes and the GKE nodepool code sets exclude labels on upgrade, this can be replaced with CandidateNodesPredicate. - candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes = func(node *api_v1.Node, logger klog.Logger) bool { - return nodePredicateInternal(node, true, true, logger) - } -) - var ErrProviderIDNotFound = errors.New("providerID not found") var ErrSplitProviderID = errors.New("error splitting providerID") var ErrNodeNotFound = errors.New("node not found") +var ErrNodeNotInDefaultSubnet = errors.New("Node not in default subnet") // providerIDRE is the regex to process providerID. // A providerID is build out of '${ProviderName}://${project-id}/${zone}/${instance-name}' @@ -79,7 +61,7 @@ var providerIDRE = regexp.MustCompile(`^` + "gce" + `://([^/]+)/([^/]+)/([^/]+)$ // ZoneGetter manages lookups for GCE instances to zones. type ZoneGetter struct { - nodeInformer cache.SharedIndexInformer + nodeLister cache.Indexer // Mode indicates if the ZoneGetter is in GCP or Non-GCP mode // GCP mode ZoneGetter fetches zones from k8s node resource objects. // Non-GCP mode ZoneGetter always return its one single stored zone @@ -87,6 +69,12 @@ type ZoneGetter struct { // singleStoredZone is the single stored zone in the zoneGetter // It is only used in Non-GCP mode. singleStoredZone string + + // Whether zoneGetter should only list default subnet nodes. + onlyIncludeDefaultSubnetNodes bool + + // The subnetURL of the cluster's default subnet. + defaultSubnetURL string } // ZoneForNode returns the zone for a given node by looking up providerID. @@ -97,19 +85,23 @@ func (z *ZoneGetter) ZoneForNode(name string, logger klog.Logger) (string, error return z.singleStoredZone, nil } - nodeLister := z.nodeInformer.GetIndexer() - node, err := listers.NewNodeLister(nodeLister).Get(name) + nodeLogger := logger.WithValues("nodeName", name) + node, err := listers.NewNodeLister(z.nodeLister).Get(name) if err != nil { - logger.Error(err, "Failed to get node", "nodeName", name) + nodeLogger.Error(err, "Failed to get node") return "", fmt.Errorf("%w: failed to get node %s", ErrNodeNotFound, name) } + if z.onlyIncludeDefaultSubnetNodes && !isNodeInDefaultSubnet(node, z.defaultSubnetURL, nodeLogger) { + nodeLogger.Error(ErrNodeNotInDefaultSubnet, "Node is invalid since it is not in the default subnet") + return "", ErrNodeNotInDefaultSubnet + } if node.Spec.ProviderID == "" { - logger.Error(ErrProviderIDNotFound, "Node does not have providerID", "nodeName", name) + nodeLogger.Error(ErrProviderIDNotFound, "Node does not have providerID") return "", ErrProviderIDNotFound } zone, err := getZone(node) if err != nil { - logger.Error(err, "Failed to get zone from the providerID", "nodeName", name) + nodeLogger.Error(err, "Failed to get zone from the providerID") return "", err } return zone, nil @@ -117,14 +109,30 @@ func (z *ZoneGetter) ZoneForNode(name string, logger klog.Logger) (string, error // ListNodes returns a list of nodes that satisfy the given node filtering mode. func (z *ZoneGetter) ListNodes(filter Filter, logger klog.Logger) ([]*api_v1.Node, error) { - nodeLister := z.nodeInformer.GetIndexer() - logger.Info("Listing nodes", "filter", filter) - nodes, err := listNodesWithFilter(listers.NewNodeLister(nodeLister), filter, logger) + filterLogger := logger.WithValues("filter", filter) + filterLogger.Info("Listing nodes") + + nodes, err := listers.NewNodeLister(z.nodeLister).List(labels.Everything()) if err != nil { - logger.Error(err, "Failed to list nodes") - return []*api_v1.Node{}, err + filterLogger.Error(err, "Failed to list all nodes") + return nil, err + } + + var selected []*api_v1.Node + var filteredOut []string + for _, node := range nodes { + // Filter nodes with predicate and exclude nodes without providerID + if z.IsNodeSelectedByFilter(node, filter, filterLogger) { + selected = append(selected, node) + } else { + filteredOut = append(filteredOut, node.Name) + } + } + if len(filteredOut) <= 50 { + filterLogger.Info("Filtered out nodes when listing node zones", "nodes", filteredOut) } - return nodes, nil + + return selected, nil } // ListZones returns a list of zones containing nodes that satisfy the given @@ -135,17 +143,18 @@ func (z *ZoneGetter) ListZones(filter Filter, logger klog.Logger) ([]string, err return []string{z.singleStoredZone}, nil } - logger.Info("Listing zones", "filter", filter) + filterLogger := logger.WithValues("filter", filter) + filterLogger.Info("Listing zones") nodes, err := z.ListNodes(filter, logger) if err != nil { - logger.Error(err, "Failed to list nodes", "filter", filter) + filterLogger.Error(err, "Failed to list nodes") return []string{}, err } zones := sets.String{} for _, n := range nodes { zone, err := getZone(n) if err != nil { - logger.Error(err, "Failed to get zone from providerID", "nodeName", n.Name) + filterLogger.Error(err, "Failed to get zone from providerID", "nodeName", n.Name) continue } zones.Insert(zone) @@ -153,77 +162,50 @@ func (z *ZoneGetter) ListZones(filter Filter, logger klog.Logger) ([]string, err return zones.List(), nil } -func (z *ZoneGetter) CheckNodeWithPredicate(node *api_v1.Node, filter Filter, logger klog.Logger) bool { - pred, err := getPredicate(filter) - if err != nil { - logger.Error(err, "Failed to get predicate", "filter", filter) +// IsNodeSelectedByFilter checks if the node matches the node filter mode. +func (z *ZoneGetter) IsNodeSelectedByFilter(node *api_v1.Node, filter Filter, filterLogger klog.Logger) bool { + nodeAndFilterLogger := filterLogger.WithValues("nodeName", node.Name) + switch filter { + case AllNodesFilter: + return z.allNodesPredicate(node, nodeAndFilterLogger) + case CandidateNodesFilter: + return z.candidateNodesPredicate(node, nodeAndFilterLogger) + case CandidateAndUnreadyNodesFilter: + return z.candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes(node, nodeAndFilterLogger) + default: return false } - return pred(node, logger) } -// listNodesWithFilter gets nodes that matches node filter mode. -func listNodesWithFilter(nodeLister listers.NodeLister, filter Filter, logger klog.Logger) ([]*api_v1.Node, error) { - logger.Info("Filtering nodes", "filter", filter) - - predicate, err := getPredicate(filter) - if err != nil { - logger.Error(err, "Failed to get predicate", "filter", filter) - return nil, err - } - nodes, err := nodeLister.List(labels.Everything()) - if err != nil { - logger.Error(err, "Failed to list all nodes") - return nil, err - } +// allNodesPredicate selects all nodes. +func (z *ZoneGetter) allNodesPredicate(node *api_v1.Node, nodeLogger klog.Logger) bool { + if z.onlyIncludeDefaultSubnetNodes && !isNodeInDefaultSubnet(node, z.defaultSubnetURL, nodeLogger) { + nodeLogger.Error(ErrNodeNotInDefaultSubnet, "Ignoring node since it is not in the default subnet") + return false - var filtered []*api_v1.Node - var filteredOut []string - for _, node := range nodes { - // Filter nodes with predicate and exclude nodes without providerID - if predicate(node, logger) { - filtered = append(filtered, node) - } else { - filteredOut = append(filteredOut, node.Name) - } - } - if len(filteredOut) <= 50 { - logger.Info("Filtered out nodes when listing node zones", "nodes", filteredOut, "filter", filter) } + return true +} - return filtered, nil +// candidateNodesPredicate selects all nodes that are in ready state and devoid of any exclude labels. +// This is a duplicate definition of the function in: +// https://github.com/kubernetes/kubernetes/blob/3723713c550f649b6ba84964edef9da6cc334f9d/staging/src/k8s.io/cloud-provider/controllers/service/controller.go#L668 +func (z *ZoneGetter) candidateNodesPredicate(node *api_v1.Node, nodeAndFilterLogger klog.Logger) bool { + return z.nodePredicateInternal(node, false, false, nodeAndFilterLogger) } -func getPredicate(filter Filter) (nodeConditionPredicate, error) { - switch filter { - case AllNodesFilter: - return allNodesPredicate, nil - case CandidateNodesFilter: - return candidateNodesPredicate, nil - case CandidateAndUnreadyNodesFilter: - return candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes, nil - default: - return nil, fmt.Errorf("No matching predicate for filter %s", filter) - } +// candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes selects all nodes except ones that are upgrading and/or have any exclude labels. This function tolerates unready nodes. +// TODO(prameshj) - Once the kubernetes/kubernetes Predicate function includes Unready nodes and the GKE nodepool code sets exclude labels on upgrade, this can be replaced with CandidateNodesPredicate. +func (z *ZoneGetter) candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes(node *api_v1.Node, nodeAndFilterLogger klog.Logger) bool { + return z.nodePredicateInternal(node, true, true, nodeAndFilterLogger) } -// getZone gets zone information from node provider id. -// A providerID is build out of '${ProviderName}://${project-id}/${zone}/${instance-name}' -func getZone(node *api_v1.Node) (string, error) { - if node.Spec.ProviderID == "" { - return "", fmt.Errorf("%w: node %s does not have providerID", ErrProviderIDNotFound, node.Name) - } - matches := providerIDRE.FindStringSubmatch(node.Spec.ProviderID) - if len(matches) != 4 { - return "", fmt.Errorf("%w: providerID %q of node %s is not valid", ErrSplitProviderID, node.Spec.ProviderID, node.Name) - } - if matches[2] == "" { - return "", fmt.Errorf("%w: node %s has an empty zone", ErrSplitProviderID, node.Name) +func (z *ZoneGetter) nodePredicateInternal(node *api_v1.Node, includeUnreadyNodes, excludeUpgradingNodes bool, nodeAndFilterLogger klog.Logger) bool { + if z.onlyIncludeDefaultSubnetNodes && !isNodeInDefaultSubnet(node, z.defaultSubnetURL, nodeAndFilterLogger) { + nodeAndFilterLogger.Error(ErrNodeNotInDefaultSubnet, "Ignoring node since it is not in the default subnet") + return false } - return matches[2], nil -} -func nodePredicateInternal(node *api_v1.Node, includeUnreadyNodes, excludeUpgradingNodes bool, logger klog.Logger) bool { // Get all nodes that have a taint with NoSchedule effect for _, taint := range node.Spec.Taints { if taint.Key == utils.ToBeDeletedTaint { @@ -263,12 +245,69 @@ func nodePredicateInternal(node *api_v1.Node, includeUnreadyNodes, excludeUpgrad // We consider the node for load balancing only when its NodeReady condition status // is ConditionTrue if cond.Type == api_v1.NodeReady && cond.Status != api_v1.ConditionTrue { - logger.V(4).Info("Ignoring node", "nodeName", node.Name, "conditionType", cond.Type, "conditionStatus", cond.Status) + nodeAndFilterLogger.V(4).Info("Ignoring node", "conditionType", cond.Type, "conditionStatus", cond.Status) return false } } return true +} +// ZoneForNode returns if the given node is in default subnet. +func (z *ZoneGetter) IsDefaultSubnetNode(nodeName string, logger klog.Logger) bool { + nodeLogger := logger.WithValues("nodeName", nodeName) + node, err := listers.NewNodeLister(z.nodeLister).Get(nodeName) + if err != nil { + nodeLogger.Error(err, "Failed to get node") + return false + } + return isNodeInDefaultSubnet(node, z.defaultSubnetURL, logger) +} + +// isNodeInDefaultSubnet checks if the node is in the default subnet. +// +// For any new nodes created after multi-subnet cluster is enabled, they are +// guaranteed to have the subnet label if PodCIDR is populated. For any +// existing nodes, they will not have label and can only be in the default +// subnet. +func isNodeInDefaultSubnet(node *api_v1.Node, defaultSubnetURL string, nodeLogger klog.Logger) bool { + if node.Spec.PodCIDR == "" { + nodeLogger.Info("Node does not have PodCIDR set") + return false + } + + nodeSubnet, exist := node.Labels[utils.LabelNodeSubnet] + if !exist { + nodeLogger.Info("Node does not have subnet label key, assumed to be in the default subnet", "subnet label key", utils.LabelNodeSubnet) + return true + } + if nodeSubnet == "" { + nodeLogger.Info("Node has empty value for subnet label key, assumed to be in the default subnet", "subnet label key", utils.LabelNodeSubnet) + return true + } + nodeLogger.Info("Node has an non-empty value for subnet label key", "subnet label key", utils.LabelNodeSubnet, "subnet label value", nodeSubnet) + + defaultSubnet, err := utils.KeyName(defaultSubnetURL) + if err != nil { + nodeLogger.Error(err, "Failed to extract default subnet information from URL", "defaultSubnetURL", defaultSubnetURL) + return false + } + return nodeSubnet == defaultSubnet +} + +// getZone gets zone information from node provider id. +// A providerID is build out of '${ProviderName}://${project-id}/${zone}/${instance-name}' +func getZone(node *api_v1.Node) (string, error) { + if node.Spec.ProviderID == "" { + return "", fmt.Errorf("%w: node %s does not have providerID", ErrProviderIDNotFound, node.Name) + } + matches := providerIDRE.FindStringSubmatch(node.Spec.ProviderID) + if len(matches) != 4 { + return "", fmt.Errorf("%w: providerID %q of node %s is not valid", ErrSplitProviderID, node.Spec.ProviderID, node.Name) + } + if matches[2] == "" { + return "", fmt.Errorf("%w: node %s has an empty zone", ErrSplitProviderID, node.Name) + } + return matches[2], nil } // NewNonGCPZoneGetter initialize a ZoneGetter in Non-GCP mode. @@ -280,9 +319,21 @@ func NewNonGCPZoneGetter(zone string) *ZoneGetter { } // NewZoneGetter initialize a ZoneGetter in GCP mode. -func NewZoneGetter(nodeInformer cache.SharedIndexInformer) *ZoneGetter { +func NewZoneGetter(nodeInformer cache.SharedIndexInformer, defaultSubnetURL string) *ZoneGetter { + return &ZoneGetter{ + mode: GCP, + nodeLister: nodeInformer.GetIndexer(), + onlyIncludeDefaultSubnetNodes: flags.F.EnableMultiSubnetCluster, + defaultSubnetURL: defaultSubnetURL, + } +} + +// NewFakeZoneGetter initialize a fake ZoneGetter in GCP mode to use in test. +func NewFakeZoneGetter(nodeInformer cache.SharedIndexInformer, defaultSubnetURL string, onlyIncludeDefaultSubnetNodes bool) *ZoneGetter { return &ZoneGetter{ - mode: GCP, - nodeInformer: nodeInformer, + mode: GCP, + nodeLister: nodeInformer.GetIndexer(), + onlyIncludeDefaultSubnetNodes: onlyIncludeDefaultSubnetNodes, + defaultSubnetURL: defaultSubnetURL, } } diff --git a/pkg/utils/zonegetter/zone_getter_test.go b/pkg/utils/zonegetter/zone_getter_test.go index 22ae076aac..57217da87f 100644 --- a/pkg/utils/zonegetter/zone_getter_test.go +++ b/pkg/utils/zonegetter/zone_getter_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + api_v1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-gce/pkg/utils" @@ -31,119 +32,56 @@ import ( func TestListZones(t *testing.T) { t.Parallel() - fakeNodeInformer := FakeNodeInformer() - zoneGetter := NewZoneGetter(fakeNodeInformer) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeWithProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-a/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "UnReadyNodeWithProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-b/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionFalse, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeWithoutProviderID", - }, - Spec: apiv1.NodeSpec{}, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "UnReadyNodeWithoutProviderID", - }, - Spec: apiv1.NodeSpec{}, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionFalse, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeInvalidProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://us-central1-c/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "UpgradingNodeWithProviderID", - Labels: map[string]string{ - "operation.gke.io/type": "drain", - }, - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-f/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionFalse, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeWithEmptyZone", + + nodeInformer := FakeNodeInformer() + PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + testCases := []struct { + desc string + filter Filter + expectLen int + }{ + { + desc: "List with AllNodesFilter", + filter: AllNodesFilter, + expectLen: 4, }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project//bar-node", + { + desc: "List with CandidateNodesFilter", + filter: CandidateNodesFilter, + expectLen: 3, }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, + { + desc: "List with CandidateAndUnreadyNodesFilter", + filter: CandidateAndUnreadyNodesFilter, + expectLen: 3, }, - }) + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + for _, enableMultiSubnetCluster := range []bool{true, false} { + zoneGetter.onlyIncludeDefaultSubnetNodes = enableMultiSubnetCluster + zones, _ := zoneGetter.ListZones(tc.filter, klog.TODO()) + if len(zones) != tc.expectLen { + t.Errorf("For test case %q with onlyIncludeDefaultSubnetNodes = %v, got %d zones, want %d zones", tc.desc, enableMultiSubnetCluster, len(zones), tc.expectLen) + } + for _, zone := range zones { + if zone == "" { + t.Errorf("For test case %q with onlyIncludeDefaultSubnetNodes = %v, got an empty zone,", tc.desc, enableMultiSubnetCluster) + } + } + } + }) + } +} + +func TestListZonesMultipleSubnets(t *testing.T) { + t.Parallel() + + nodeInformer := FakeNodeInformer() + PopulateFakeNodeInformer(nodeInformer, true) + zoneGetter := NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, true) testCases := []struct { desc string @@ -153,17 +91,17 @@ func TestListZones(t *testing.T) { { desc: "List with AllNodesFilter", filter: AllNodesFilter, - expectLen: 3, + expectLen: 6, }, { desc: "List with CandidateNodesFilter", filter: CandidateNodesFilter, - expectLen: 1, + expectLen: 5, }, { desc: "List with CandidateAndUnreadyNodesFilter", filter: CandidateAndUnreadyNodesFilter, - expectLen: 2, + expectLen: 5, }, } @@ -171,133 +109,64 @@ func TestListZones(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { zones, _ := zoneGetter.ListZones(tc.filter, klog.TODO()) if len(zones) != tc.expectLen { - t.Errorf("For test case %q, got %d zones, want %d,", tc.desc, len(zones), tc.expectLen) + t.Errorf("For test case %q with multi subnet cluster enabled, got %d zones, want %d zones", tc.desc, len(zones), tc.expectLen) } for _, zone := range zones { if zone == "" { - t.Errorf("For test case %q, got an empty zone,", tc.desc) + t.Errorf("For test case %q with multi subnet cluster enabled, got an empty zone,", tc.desc) } } }) - } } func TestListNodes(t *testing.T) { t.Parallel() - fakeNodeInformer := FakeNodeInformer() - zoneGetter := NewZoneGetter(fakeNodeInformer) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeWithProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-a/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "UnReadyNodeWithProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-b/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionFalse, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeWithoutProviderID", - }, - Spec: apiv1.NodeSpec{}, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "UnReadyNodeWithoutProviderID", - }, - Spec: apiv1.NodeSpec{}, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionFalse, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeInvalidProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://us-central1-c/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "UpgradingNodeWithProviderID", - Labels: map[string]string{ - "operation.gke.io/type": "drain", - }, - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-f/bar-node", - }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionFalse, - }, - }, - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ReadyNodeWithEmptyZone", + + nodeInformer := FakeNodeInformer() + PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) + + testCases := []struct { + desc string + filter Filter + expectLen int + }{ + { + desc: "List with AllNodesFilter", + filter: AllNodesFilter, + expectLen: 13, }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project//bar-node", + { + desc: "List with CandidateNodesFilter", + filter: CandidateNodesFilter, + expectLen: 11, }, - Status: apiv1.NodeStatus{ - Conditions: []apiv1.NodeCondition{ - { - Type: apiv1.NodeReady, - Status: apiv1.ConditionTrue, - }, - }, + { + desc: "List with CandidateAndUnreadyNodesFilter", + filter: CandidateAndUnreadyNodesFilter, + expectLen: 9, }, - }) + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + for _, enableMultiSubnetCluster := range []bool{true, false} { + zoneGetter.onlyIncludeDefaultSubnetNodes = enableMultiSubnetCluster + nodes, _ := zoneGetter.ListNodes(tc.filter, klog.TODO()) + if len(nodes) != tc.expectLen { + t.Errorf("For test case %q with onlyIncludeDefaultSubnetNodes = %v, got %d nodes, want %d,", tc.desc, enableMultiSubnetCluster, len(nodes), tc.expectLen) + } + } + }) + } +} + +func TestListNodesMultipleSubnets(t *testing.T) { + t.Parallel() + + nodeInformer := FakeNodeInformer() + PopulateFakeNodeInformer(nodeInformer, true) + zoneGetter := NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, true) testCases := []struct { desc string @@ -307,65 +176,35 @@ func TestListNodes(t *testing.T) { { desc: "List with AllNodesFilter", filter: AllNodesFilter, - expectLen: 7, + expectLen: 15, }, { desc: "List with CandidateNodesFilter", filter: CandidateNodesFilter, - expectLen: 4, + expectLen: 13, }, { desc: "List with CandidateAndUnreadyNodesFilter", filter: CandidateAndUnreadyNodesFilter, - expectLen: 6, + expectLen: 11, }, } - for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { nodes, _ := zoneGetter.ListNodes(tc.filter, klog.TODO()) if len(nodes) != tc.expectLen { - t.Errorf("For test case %q, got %d nodes, want %d,", tc.desc, len(nodes), tc.expectLen) + t.Errorf("For test case %q with multi-subnet cluster enabled, got %d nodes, want %d,", tc.desc, len(nodes), tc.expectLen) } }) } - } func TestZoneForNode(t *testing.T) { t.Parallel() - fakeNodeInformer := FakeNodeInformer() - zoneGetter := NewZoneGetter(fakeNodeInformer) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "NodeWithValidProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project/us-central1-a/bar-node", - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "NodeWithInvalidProviderID", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://us-central1-a/bar-node", - }, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "NodeWithNoProviderID", - }, - Spec: apiv1.NodeSpec{}, - }) - zoneGetter.nodeInformer.GetIndexer().Add(&apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "NodeWithEmptyZone", - }, - Spec: apiv1.NodeSpec{ - ProviderID: "gce://foo-project//bar-node", - }, - }) + + nodeInformer := FakeNodeInformer() + PopulateFakeNodeInformer(nodeInformer, false) + zoneGetter := NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false) testCases := []struct { desc string @@ -381,37 +220,92 @@ func TestZoneForNode(t *testing.T) { }, { desc: "Node with valid provider ID", - nodeName: "NodeWithValidProviderID", - expectZone: "us-central1-a", + nodeName: "instance1", + expectZone: "zone1", expectErr: nil, }, { desc: "Node with invalid provider ID", - nodeName: "NodeWithInvalidProviderID", + nodeName: "instance-invalid-providerID", expectZone: "", expectErr: ErrSplitProviderID, }, { desc: "Node with no provider ID", - nodeName: "NodeWithNoProviderID", + nodeName: "instance-empty-providerID", expectZone: "", expectErr: ErrProviderIDNotFound, }, { desc: "Node with empty zone in providerID", - nodeName: "NodeWithEmptyZone", + nodeName: "instance-empty-zone-providerID", expectZone: "", expectErr: ErrSplitProviderID, }, } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + for _, enableMultiSubnetCluster := range []bool{true, false} { + zoneGetter.onlyIncludeDefaultSubnetNodes = enableMultiSubnetCluster + zone, err := zoneGetter.ZoneForNode(tc.nodeName, klog.TODO()) + if zone != tc.expectZone { + t.Errorf("For test case %q with onlyIncludeDefaultSubnetNodes = %v , got zone: %s, want: %s,", tc.desc, enableMultiSubnetCluster, zone, tc.expectZone) + } + if !errors.Is(err, tc.expectErr) { + t.Errorf("For test case %q with onlyIncludeDefaultSubnetNodes = %v, got error: %s, want: %s,", tc.desc, enableMultiSubnetCluster, err, tc.expectErr) + } + + } + }) + } +} + +func TestZoneForNodeMultipleSubnets(t *testing.T) { + t.Parallel() + + nodeInformer := FakeNodeInformer() + PopulateFakeNodeInformer(nodeInformer, true) + zoneGetter := NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, true) + + testCases := []struct { + desc string + nodeName string + expectZone string + expectErr error + }{ + { + desc: "Node with default Subnet Label", + nodeName: "default-subnet-label-instance", + expectZone: "zone5", + expectErr: nil, + }, + { + desc: "Node with empty Subnet Label", + nodeName: "empty-subnet-label-instance", + expectZone: "zone6", + expectErr: nil, + }, + { + desc: "Node without PodCIDR", + nodeName: "no-podcidr-instance", + expectZone: "", + expectErr: ErrNodeNotInDefaultSubnet, + }, + { + desc: "Node in non-default subnet", + nodeName: "non-default-subnet-instance", + expectZone: "", + expectErr: ErrNodeNotInDefaultSubnet, + }, + } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { zone, err := zoneGetter.ZoneForNode(tc.nodeName, klog.TODO()) if zone != tc.expectZone { - t.Errorf("For test case %q, got zone: %s, want: %s,", tc.desc, zone, tc.expectZone) + t.Errorf("For test case %q with multi-subnet cluster enabled, got zone: %s, want: %s,", tc.desc, zone, tc.expectZone) } if !errors.Is(err, tc.expectErr) { - t.Errorf("For test case %q, got error: %s, want: %s,", tc.desc, err, tc.expectErr) + t.Errorf("For test case %q with multi-subnet cluster enabled, got error: %s, want: %s,", tc.desc, err, tc.expectErr) } }) } @@ -501,59 +395,102 @@ func TestNonGCPZoneGetter(t *testing.T) { validateGetZoneForNode("bar-node") } -func TestGetNodeConditionPredicate(t *testing.T) { - tests := []struct { - node apiv1.Node - expectAccept, expectAcceptByUnreadyNodePredicate bool - name string +func TestIsNodeSelectedByFilter(t *testing.T) { + fakeNodeInformer := FakeNodeInformer() + zoneGetter := NewFakeZoneGetter(fakeNodeInformer, defaultTestSubnetURL, true) + + testCases := []struct { + node apiv1.Node + expectAcceptByAll bool + expectAcceptByCandidate bool + expectAcceptByUnready bool + name string }{ { - node: apiv1.Node{}, - expectAccept: false, - - name: "empty", + node: apiv1.Node{}, + expectAcceptByAll: false, + expectAcceptByCandidate: false, + expectAcceptByUnready: false, + name: "empty", }, { node: apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, }, }, }, - expectAccept: true, - expectAcceptByUnreadyNodePredicate: true, - name: "ready node", + expectAcceptByAll: true, + expectAcceptByCandidate: true, + expectAcceptByUnready: true, + name: "ready node", }, { node: apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ {Type: apiv1.NodeReady, Status: apiv1.ConditionFalse}, }, }, }, - expectAccept: false, - expectAcceptByUnreadyNodePredicate: true, - name: "unready node", + expectAcceptByAll: true, + expectAcceptByCandidate: false, + expectAcceptByUnready: true, + name: "unready node", }, { node: apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ {Type: apiv1.NodeReady, Status: apiv1.ConditionUnknown}, }, }, }, - expectAccept: false, - expectAcceptByUnreadyNodePredicate: true, - name: "ready status unknown", + expectAcceptByAll: true, + expectAcceptByCandidate: false, + expectAcceptByUnready: true, + name: "ready status unknown", }, { node: apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - Labels: map[string]string{utils.LabelNodeRoleExcludeBalancer: "true"}, + Name: "node1", + Labels: map[string]string{ + utils.LabelNodeRoleExcludeBalancer: "true", + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ @@ -561,9 +498,10 @@ func TestGetNodeConditionPredicate(t *testing.T) { }, }, }, - expectAccept: false, - expectAcceptByUnreadyNodePredicate: false, - name: "ready node, excluded from loadbalancers", + expectAcceptByAll: true, + expectAcceptByCandidate: false, + expectAcceptByUnready: false, + name: "ready node, excluded from loadbalancers", }, { node: apiv1.Node{ @@ -571,17 +509,23 @@ func TestGetNodeConditionPredicate(t *testing.T) { Name: "node1", Labels: map[string]string{ utils.GKECurrentOperationLabel: utils.NodeDrain, + utils.LabelNodeSubnet: defaultTestSubnet, }, }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, }, }, }, - expectAccept: true, - expectAcceptByUnreadyNodePredicate: false, - name: "ready node, upgrade/drain in progress", + expectAcceptByAll: true, + expectAcceptByCandidate: true, + expectAcceptByUnready: false, + name: "ready node, upgrade/drain in progress", }, { node: apiv1.Node{ @@ -589,33 +533,54 @@ func TestGetNodeConditionPredicate(t *testing.T) { Name: "node1", Labels: map[string]string{ utils.GKECurrentOperationLabel: "random", + utils.LabelNodeSubnet: defaultTestSubnet, }, }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, }, }, }, - expectAccept: true, - expectAcceptByUnreadyNodePredicate: true, - name: "ready node, non-drain operation", + expectAcceptByAll: true, + expectAcceptByCandidate: true, + expectAcceptByUnready: true, + name: "ready node, non-drain operation", }, { node: apiv1.Node{ - Spec: apiv1.NodeSpec{Unschedulable: true}, + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + Unschedulable: true, + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, }, }, }, - expectAccept: true, - expectAcceptByUnreadyNodePredicate: true, - name: "unschedulable", + expectAcceptByAll: true, + expectAcceptByCandidate: true, + expectAcceptByUnready: true, + name: "unschedulable", }, { node: apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, Spec: apiv1.NodeSpec{ Taints: []apiv1.Taint{ { @@ -624,6 +589,8 @@ func TestGetNodeConditionPredicate(t *testing.T) { Effect: apiv1.TaintEffectNoSchedule, }, }, + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, }, Status: apiv1.NodeStatus{ Conditions: []apiv1.NodeCondition{ @@ -631,70 +598,189 @@ func TestGetNodeConditionPredicate(t *testing.T) { }, }, }, - expectAccept: false, - expectAcceptByUnreadyNodePredicate: false, - name: "ToBeDeletedByClusterAutoscaler-taint", + expectAcceptByAll: true, + expectAcceptByCandidate: false, + expectAcceptByUnready: false, + name: "ToBeDeletedByClusterAutoscaler-taint", + }, + { + node: apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: nonDefaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + Status: apiv1.NodeStatus{ + Conditions: []apiv1.NodeCondition{ + {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, + }, + }, + }, + expectAcceptByAll: false, + expectAcceptByCandidate: false, + expectAcceptByUnready: false, + name: "node in non-default subnet", + }, + { + node: apiv1.Node{ + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + Status: apiv1.NodeStatus{ + Conditions: []apiv1.NodeCondition{ + {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, + }, + }, + }, + expectAcceptByAll: true, + expectAcceptByCandidate: true, + expectAcceptByUnready: true, + name: "node without subnet label", + }, + { + node: apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{}, + Status: apiv1.NodeStatus{ + Conditions: []apiv1.NodeCondition{ + {Type: apiv1.NodeReady, Status: apiv1.ConditionTrue}, + }, + }, + }, + expectAcceptByAll: false, + expectAcceptByCandidate: false, + expectAcceptByUnready: false, + name: "node without PodCIDR", }, } - pred := candidateNodesPredicate - unreadyPred := candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes - for _, test := range tests { - accept := pred(&test.node, klog.TODO()) - if accept != test.expectAccept { - t.Errorf("Test failed for %s, got %v, want %v", test.name, accept, test.expectAccept) + for _, tc := range testCases { + acceptByAll := zoneGetter.IsNodeSelectedByFilter(&tc.node, AllNodesFilter, klog.TODO()) + if acceptByAll != tc.expectAcceptByAll { + t.Errorf("Test failed for %s, got %v, want %v", tc.name, acceptByAll, tc.expectAcceptByAll) + } + + acceptByCandidate := zoneGetter.IsNodeSelectedByFilter(&tc.node, CandidateNodesFilter, klog.TODO()) + if acceptByCandidate != tc.expectAcceptByCandidate { + t.Errorf("Test failed for %s, got %v, want %v", tc.name, acceptByCandidate, tc.expectAcceptByCandidate) } - unreadyAccept := unreadyPred(&test.node, klog.TODO()) - if unreadyAccept != test.expectAcceptByUnreadyNodePredicate { - t.Errorf("Test failed for unreadyNodesPredicate in case %s, got %v, want %v", test.name, unreadyAccept, test.expectAcceptByUnreadyNodePredicate) + acceptByUnready := zoneGetter.IsNodeSelectedByFilter(&tc.node, CandidateAndUnreadyNodesFilter, klog.TODO()) + if acceptByUnready != tc.expectAcceptByUnready { + t.Errorf("Test failed for unreadyNodesPredicate in case %s, got %v, want %v", tc.name, acceptByUnready, tc.expectAcceptByUnready) } } } -func TestGetPredicate(t *testing.T) { +func TestIsNodeInDefaultSubnet(t *testing.T) { t.Parallel() testCases := []struct { - desc string - filter Filter - expectPred nodeConditionPredicate - expectNil bool + desc string + node *apiv1.Node + want bool }{ - { - desc: "AllNodesFilter", - filter: AllNodesFilter, - expectPred: allNodesPredicate, - expectNil: true, + desc: "Node in the default subnet", + node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "NodeInDefaultSubnet", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + }, + want: true, + }, + { + desc: "Node without PodCIDR", + node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "NodeWithoutPodCIDR", + Labels: map[string]string{ + utils.LabelNodeSubnet: defaultTestSubnet, + }, + }, + Spec: apiv1.NodeSpec{}, + }, + want: false, }, { - desc: "CandidateNodesFilter", - filter: CandidateNodesFilter, - expectPred: candidateNodesPredicate, - expectNil: true, + desc: "Node with PodCIDR, without subnet label", + node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "NodeWithoutSubnetLabel", + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + }, + want: true, }, { - desc: "CandidateAndUnreadyNodesFilter", - filter: CandidateAndUnreadyNodesFilter, - expectPred: candidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes, - expectNil: true, + desc: "Node with PodCIDR, with empty Label", + node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "NodeWithEmptyLabel", + Labels: map[string]string{ + utils.LabelNodeSubnet: "", + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + }, + want: true, + }, + { + desc: "Node with PodCIDR, with empty Label", + node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "NodeWithEmptyLabel", + Labels: map[string]string{ + utils.LabelNodeSubnet: "", + }, + }, + Spec: apiv1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + }, + want: true, }, { - desc: "No matching Predicate", - filter: Filter("random-filter"), - expectPred: nil, - expectNil: false, + desc: "Node in non-default subnet", + node: &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "NodeInNonDefaultSubnet", + Labels: map[string]string{ + utils.LabelNodeSubnet: nonDefaultTestSubnet, + }, + }, + Spec: api_v1.NodeSpec{ + PodCIDR: "10.100.1.0/24", + PodCIDRs: []string{"10.100.1.0/24"}, + }, + }, + want: false, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - gotPred, gotErr := getPredicate(tc.filter) - if tc.expectNil && gotErr != nil { - t.Errorf("getPredicate(%s) = got err %v, expect nil", tc.filter, gotErr) - } - if !tc.expectNil && gotErr == nil { - t.Errorf("getPredicate(%s) = got err nil, expect non-nil", tc.filter) - } - if reflect.ValueOf(gotPred) != reflect.ValueOf(tc.expectPred) { - t.Errorf("getPredicate(%s) = got pred %v, expect %v", tc.filter, gotPred, tc.expectPred) + if got := isNodeInDefaultSubnet(tc.node, defaultTestSubnetURL, klog.TODO()); got != tc.want { + t.Errorf("isNodeInDefaultSubnet(%v, %s) = %v, want %v", tc.node, defaultTestSubnetURL, got, tc.want) } }) } diff --git a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1/gcpfirewall.go b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1/gcpfirewall.go similarity index 77% rename from vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1/gcpfirewall.go rename to vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1/gcpfirewall.go index c1cd43e0da..ed75bde1d2 100644 --- a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1/gcpfirewall.go +++ b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1/gcpfirewall.go @@ -16,27 +16,27 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( "context" time "time" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - gcpfirewallv1beta1 "k8s.io/cloud-provider-gcp/crd/apis/gcpfirewall/v1beta1" + gcpfirewallv1 "k8s.io/cloud-provider-gcp/crd/apis/gcpfirewall/v1" versioned "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/clientset/versioned" internalinterfaces "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/internalinterfaces" - v1beta1 "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1" + v1 "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1" ) // GCPFirewallInformer provides access to a shared informer and lister for // GCPFirewalls. type GCPFirewallInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.GCPFirewallLister + Lister() v1.GCPFirewallLister } type gCPFirewallInformer struct { @@ -57,20 +57,20 @@ func NewGCPFirewallInformer(client versioned.Interface, resyncPeriod time.Durati func NewFilteredGCPFirewallInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().GCPFirewalls().List(context.TODO(), options) + return client.NetworkingV1().GCPFirewalls().List(context.TODO(), options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().GCPFirewalls().Watch(context.TODO(), options) + return client.NetworkingV1().GCPFirewalls().Watch(context.TODO(), options) }, }, - &gcpfirewallv1beta1.GCPFirewall{}, + &gcpfirewallv1.GCPFirewall{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *gCPFirewallInformer) defaultInformer(client versioned.Interface, resync } func (f *gCPFirewallInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&gcpfirewallv1beta1.GCPFirewall{}, f.defaultInformer) + return f.factory.InformerFor(&gcpfirewallv1.GCPFirewall{}, f.defaultInformer) } -func (f *gCPFirewallInformer) Lister() v1beta1.GCPFirewallLister { - return v1beta1.NewGCPFirewallLister(f.Informer().GetIndexer()) +func (f *gCPFirewallInformer) Lister() v1.GCPFirewallLister { + return v1.NewGCPFirewallLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1/interface.go b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1/interface.go similarity index 98% rename from vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1/interface.go rename to vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1/interface.go index 376ffc96b3..1974c6bec2 100644 --- a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1/interface.go +++ b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( internalinterfaces "k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/internalinterfaces" diff --git a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1/expansion_generated.go b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1/expansion_generated.go similarity index 97% rename from vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1/expansion_generated.go rename to vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1/expansion_generated.go index efd38efca0..b6cb9e6ca7 100644 --- a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1/expansion_generated.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1beta1 +package v1 // GCPFirewallListerExpansion allows custom methods to be added to // GCPFirewallLister. diff --git a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1/gcpfirewall.go b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1/gcpfirewall.go similarity index 79% rename from vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1/gcpfirewall.go rename to vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1/gcpfirewall.go index 3c3a90a1b9..c3e7c452b3 100644 --- a/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1/gcpfirewall.go +++ b/vendor/k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1/gcpfirewall.go @@ -16,13 +16,13 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - v1beta1 "k8s.io/cloud-provider-gcp/crd/apis/gcpfirewall/v1beta1" + v1 "k8s.io/cloud-provider-gcp/crd/apis/gcpfirewall/v1" ) // GCPFirewallLister helps list GCPFirewalls. @@ -30,10 +30,10 @@ import ( type GCPFirewallLister interface { // List lists all GCPFirewalls in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.GCPFirewall, err error) + List(selector labels.Selector) (ret []*v1.GCPFirewall, err error) // Get retrieves the GCPFirewall from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.GCPFirewall, error) + Get(name string) (*v1.GCPFirewall, error) GCPFirewallListerExpansion } @@ -48,21 +48,21 @@ func NewGCPFirewallLister(indexer cache.Indexer) GCPFirewallLister { } // List lists all GCPFirewalls in the indexer. -func (s *gCPFirewallLister) List(selector labels.Selector) (ret []*v1beta1.GCPFirewall, err error) { +func (s *gCPFirewallLister) List(selector labels.Selector) (ret []*v1.GCPFirewall, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.GCPFirewall)) + ret = append(ret, m.(*v1.GCPFirewall)) }) return ret, err } // Get retrieves the GCPFirewall from the index for a given name. -func (s *gCPFirewallLister) Get(name string) (*v1beta1.GCPFirewall, error) { +func (s *gCPFirewallLister) Get(name string) (*v1.GCPFirewall, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("gcpfirewall"), name) + return nil, errors.NewNotFound(v1.Resource("gcpfirewall"), name) } - return obj.(*v1beta1.GCPFirewall), nil + return obj.(*v1.GCPFirewall), nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1ef64804ce..1f02924d38 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -867,9 +867,9 @@ k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/clientset/versioned/typed/gcpfi k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/clientset/versioned/typed/gcpfirewall/v1/fake k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/clientset/versioned/typed/gcpfirewall/v1beta1 k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/clientset/versioned/typed/gcpfirewall/v1beta1/fake -k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1beta1 +k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/gcpfirewall/v1 k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/informers/externalversions/internalinterfaces -k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1beta1 +k8s.io/cloud-provider-gcp/crd/client/gcpfirewall/listers/gcpfirewall/v1 k8s.io/cloud-provider-gcp/crd/client/network/clientset/versioned k8s.io/cloud-provider-gcp/crd/client/network/clientset/versioned/fake k8s.io/cloud-provider-gcp/crd/client/network/clientset/versioned/scheme