Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Helpers removed #2577

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
e4463b5
Only emit reset log when the syncer is in error state.
sawsa307 May 13, 2024
ff059ce
Only emit errors for non-LoadBalncer IPv6 services that need NEGs
sawsa307 May 15, 2024
0461785
Add defaultSubnetURL to zoneGetter
sawsa307 May 9, 2024
a58d717
Add node subet label LabelNodeSubnet.
sawsa307 May 13, 2024
8ac261d
Exclude non default subnet nodes in zoneGetter.
sawsa307 May 9, 2024
e36d30a
Rename and refactor test cases related to L7 endpoint calculation
sawsa307 May 13, 2024
29d806f
Refactor EnsureL4BackendService to use params struct as an argument
cezarygerard May 21, 2024
7775919
Decouple test cases in TestSyncPod
sawsa307 May 21, 2024
a55a07f
Create NewFakeZoneGetter to allow custom set of MSC flag.
sawsa307 May 20, 2024
0999372
Mark Pods from non-default subnet as ready.
sawsa307 May 22, 2024
b73af41
Unify context logging across L4 LB
FelipeYepez May 22, 2024
9686e11
Updates version of 'informerfirewall' from v1beta to v1.
maciejriedl May 23, 2024
a1c9db6
Add autogenerated files
maciejriedl May 24, 2024
cbf588d
Add autogenerated files of version v1 instead of v1beta1
maciejriedl May 24, 2024
599f25d
Add a new endpoint tracking state for non-default subnet nodes
sawsa307 May 15, 2024
b5dbc75
Add test cases when MSC flag is on and off.
sawsa307 May 22, 2024
bc88fe8
Track number of endpoints not in default subnet, and skip them in phase0
sawsa307 May 15, 2024
c2cbab3
Update TestValidateEndpoints
sawsa307 May 22, 2024
a50c93a
Update ValidateEndpoints in endpoint calculator
sawsa307 May 22, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 36 additions & 24 deletions pkg/backends/backends.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,18 @@ func NewPoolWithConnectionTrackingPolicy(cloud *gce.Cloud, namer namer.BackendNa
}
}

// L4BackendServiceParams encapsulates parameters for ensuring an L4 BackendService.
type L4BackendServiceParams struct {
Name string
HealthCheckLink string
Protocol string
SessionAffinity string
Scheme string
NamespacedName types.NamespacedName
NetworkInfo *network.NetworkInfo
ConnectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy
}

// ensureDescription updates the BackendService Description with the expected value
func ensureDescription(be *composite.BackendService, sp *utils.ServicePort) (needsUpdate bool) {
desc := sp.GetDescription()
Expand Down Expand Up @@ -306,45 +318,45 @@ func (b *Backends) DeleteSignedUrlKey(be *composite.BackendService, keyName stri
}

// EnsureL4BackendService creates or updates the backend service with the given name.
// TODO(code-elinka): refactor the list of arguments (there are too many now)
func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinity, scheme string, namespacedName types.NamespacedName, network network.NetworkInfo, connectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy, beLogger klog.Logger) (*composite.BackendService, error) {
func (b *Backends) EnsureL4BackendService(params L4BackendServiceParams, beLogger klog.Logger) (*composite.BackendService, error) {
start := time.Now()
beLogger.V(2).Info("EnsureL4BackendService started", "serviceKey", namespacedName, "scheme", scheme, "protocol", protocol, "sessionAffinity", sessionAffinity, "network", network.NetworkURL, "subnetwork", network.SubnetworkURL)
beLogger = beLogger.WithValues("serviceKey", params.NamespacedName, "scheme", params.Scheme, "protocol", params.Protocol, "sessionAffinity", params.SessionAffinity, "network", params.NetworkInfo)
beLogger.V(2).Info("EnsureL4BackendService started")
defer func() {
beLogger.V(2).Info("EnsureL4BackendService finished", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName, "timeTaken", time.Since(start))
beLogger.V(2).Info("EnsureL4BackendService finished", "timeTaken", time.Since(start))
}()

beLogger.V(2).Info("EnsureL4BackendService: checking existing backend service", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName)
key, err := composite.CreateKey(b.cloud, name, meta.Regional)
beLogger.V(2).Info("EnsureL4BackendService: checking existing backend service")
key, err := composite.CreateKey(b.cloud, params.Name, meta.Regional)
if err != nil {
return nil, err
}
bs, err := composite.GetBackendService(b.cloud, key, meta.VersionGA, beLogger)
if err != nil && !utils.IsNotFoundError(err) {
return nil, err
}
desc, err := utils.MakeL4LBServiceDescription(namespacedName.String(), "", meta.VersionGA, false, utils.ILB)
desc, err := utils.MakeL4LBServiceDescription(params.NamespacedName.String(), "", meta.VersionGA, false, utils.ILB)
if err != nil {
beLogger.Info("EnsureL4BackendService: Failed to generate description for BackendService", "err", err)
}
expectedBS := &composite.BackendService{
Name: name,
Protocol: protocol,
Name: params.Name,
Protocol: params.Protocol,
Description: desc,
HealthChecks: []string{hcLink},
SessionAffinity: utils.TranslateAffinityType(sessionAffinity, beLogger),
LoadBalancingScheme: scheme,
HealthChecks: []string{params.HealthCheckLink},
SessionAffinity: utils.TranslateAffinityType(params.SessionAffinity, beLogger),
LoadBalancingScheme: params.Scheme,
}
// We need this configuration only for Strong Session Affinity feature
if b.useConnectionTrackingPolicy {
beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using connection tracking policy: %+v", connectionTrackingPolicy), "serviceKey", namespacedName)
expectedBS.ConnectionTrackingPolicy = connectionTrackingPolicy
beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using connection tracking policy: %+v", params.ConnectionTrackingPolicy), "serviceKey", params.NamespacedName)
expectedBS.ConnectionTrackingPolicy = params.ConnectionTrackingPolicy
}
if !network.IsDefault {
beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using non-default network: %+v", network), "serviceKey", namespacedName)
expectedBS.Network = network.NetworkURL
if params.NetworkInfo != nil && !params.NetworkInfo.IsDefault {
beLogger.V(2).Info(fmt.Sprintf("EnsureL4BackendService: using non-default network: %+v", params.NetworkInfo))
expectedBS.Network = params.NetworkInfo.NetworkURL
}
if protocol == string(api_v1.ProtocolTCP) {
if params.Protocol == string(api_v1.ProtocolTCP) {
expectedBS.ConnectionDraining = &composite.ConnectionDraining{DrainingTimeoutSec: DefaultConnectionDrainingTimeoutSeconds}
} else {
// This config is not supported in UDP mode, explicitly set to 0 to reset, if proto was TCP previously.
Expand All @@ -353,35 +365,35 @@ func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinit

// Create backend service if none was found
if bs == nil {
beLogger.V(2).Info("EnsureL4BackendService: creating backend service", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName)
beLogger.V(2).Info("EnsureL4BackendService: creating backend service")
err := composite.CreateBackendService(b.cloud, key, expectedBS, beLogger)
if err != nil {
return nil, err
}
beLogger.V(2).Info("EnsureL4BackendService: created backend service successfully", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName)
beLogger.V(2).Info("EnsureL4BackendService: created backend service successfully")
// We need to perform a GCE call to re-fetch the object we just created
// so that the "Fingerprint" field is filled in. This is needed to update the
// object without error. The lookup is also needed to populate the selfLink.
return composite.GetBackendService(b.cloud, key, meta.VersionGA, beLogger)
}

if backendSvcEqual(expectedBS, bs, b.useConnectionTrackingPolicy) {
beLogger.V(2).Info("EnsureL4BackendService: backend service did not change, skipping update", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName)
beLogger.V(2).Info("EnsureL4BackendService: backend service did not change, skipping update")
return bs, nil
}
if bs.ConnectionDraining != nil && bs.ConnectionDraining.DrainingTimeoutSec > 0 && protocol == string(api_v1.ProtocolTCP) {
if bs.ConnectionDraining != nil && bs.ConnectionDraining.DrainingTimeoutSec > 0 && params.Protocol == string(api_v1.ProtocolTCP) {
// only preserves user overridden timeout value when the protocol is TCP
expectedBS.ConnectionDraining.DrainingTimeoutSec = bs.ConnectionDraining.DrainingTimeoutSec
}
beLogger.V(2).Info("EnsureL4BackendService: updating backend service", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName)
beLogger.V(2).Info("EnsureL4BackendService: updating backend service")
// Set fingerprint for optimistic locking
expectedBS.Fingerprint = bs.Fingerprint
// Copy backends to avoid detaching them during update. This could be replaced with a patch call in the future.
expectedBS.Backends = bs.Backends
if err := composite.UpdateBackendService(b.cloud, key, expectedBS, beLogger); err != nil {
return nil, err
}
beLogger.V(2).Info("EnsureL4BackendService: updated backend service successfully", "protocol", protocol, "scheme", scheme, "serviceKey", namespacedName)
beLogger.V(2).Info("EnsureL4BackendService: updated backend service successfully")

return composite.GetBackendService(b.cloud, key, meta.VersionGA, beLogger)
}
Expand Down
28 changes: 24 additions & 4 deletions pkg/backends/backends_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,18 @@ func TestEnsureL4BackendService(t *testing.T) {

hcLink := l4namer.L4HealthCheck(tc.serviceNamespace, tc.serviceName, false)
bsName := l4namer.L4Backend(tc.serviceNamespace, tc.serviceName)
network := network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"}
bs, err := backendPool.EnsureL4BackendService(bsName, hcLink, tc.protocol, tc.affinityType, tc.schemeType, namespacedName, network, tc.connectionTrackingPolicy, klog.TODO())
network := &network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"}
backendParams := L4BackendServiceParams{
Name: bsName,
HealthCheckLink: hcLink,
Protocol: tc.protocol,
SessionAffinity: tc.affinityType,
Scheme: tc.schemeType,
NamespacedName: namespacedName,
NetworkInfo: network,
ConnectionTrackingPolicy: tc.connectionTrackingPolicy,
}
bs, err := backendPool.EnsureL4BackendService(backendParams, klog.TODO())
if err != nil {
t.Errorf("EnsureL4BackendService failed")
}
Expand Down Expand Up @@ -162,7 +172,7 @@ func TestEnsureL4BackendServiceDoesNotDetachBackends(t *testing.T) {

hcLink := l4namer.L4HealthCheck(serviceNamespace, serviceName, false)
bsName := l4namer.L4Backend(serviceNamespace, serviceName)
network := network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"}
network := &network.NetworkInfo{IsDefault: false, NetworkURL: "https://www.googleapis.com/compute/v1/projects/test-poject/global/networks/test-vpc"}

backendName := "testNeg"
existingBS := &composite.BackendService{
Expand All @@ -189,7 +199,17 @@ func TestEnsureL4BackendServiceDoesNotDetachBackends(t *testing.T) {
}

var noConnectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy = nil
bs, err := backendPool.EnsureL4BackendService(bsName, hcLink, "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), namespacedName, network, noConnectionTrackingPolicy, klog.TODO())
backendParams := L4BackendServiceParams{
Name: bsName,
HealthCheckLink: hcLink,
Protocol: "TCP",
SessionAffinity: string(v1.ServiceAffinityNone),
Scheme: string(cloud.SchemeInternal),
NamespacedName: namespacedName,
NetworkInfo: network,
ConnectionTrackingPolicy: noConnectionTrackingPolicy,
}
bs, err := backendPool.EnsureL4BackendService(backendParams, klog.TODO())
if err != nil {
t.Errorf("EnsureL4BackendService failed")
}
Expand Down
17 changes: 10 additions & 7 deletions pkg/backends/ig_linker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,10 @@ import (
"k8s.io/klog/v2"
)

const defaultZone = "zone-a"
const (
defaultTestZone = "zone-a"
defaultTestSubnetURL = "https://www.googleapis.com/compute/v1/projects/proj/regions/us-central1/subnetworks/default"
)

func newTestIGLinker(fakeGCE *gce.Cloud, fakeInstancePool instancegroups.Manager) *instanceGroupLinker {
fakeBackendPool := NewPool(fakeGCE, defaultNamer)
Expand All @@ -56,8 +59,8 @@ func TestLink(t *testing.T) {
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())

nodeInformer := zonegetter.FakeNodeInformer()
fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer)
zonegetter.AddFakeNodes(fakeZoneGetter, defaultZone, "test-instance")
fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false)
zonegetter.AddFakeNodes(fakeZoneGetter, defaultTestZone, "test-instance")

fakeNodePool := instancegroups.NewManager(&instancegroups.ManagerConfig{
Cloud: fakeIGs,
Expand All @@ -79,7 +82,7 @@ func TestLink(t *testing.T) {
// Mimic the syncer creating the backend.
linker.backendPool.Create(sp, "fake-health-check-link", klog.TODO())

if err := linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
if err := linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil {
t.Fatalf("%v", err)
}

Expand All @@ -98,8 +101,8 @@ func TestLinkWithCreationModeError(t *testing.T) {
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())

nodeInformer := zonegetter.FakeNodeInformer()
fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer)
zonegetter.AddFakeNodes(fakeZoneGetter, defaultZone, "test-instance")
fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false)
zonegetter.AddFakeNodes(fakeZoneGetter, defaultTestZone, "test-instance")

fakeNodePool := instancegroups.NewManager(&instancegroups.ManagerConfig{
Cloud: fakeIGs,
Expand Down Expand Up @@ -135,7 +138,7 @@ func TestLinkWithCreationModeError(t *testing.T) {
// Mimic the syncer creating the backend.
linker.backendPool.Create(sp, "fake-health-check-link", klog.TODO())

if err := linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
if err := linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil {
t.Fatalf("%v", err)
}

Expand Down
20 changes: 10 additions & 10 deletions pkg/backends/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ func newTestJig(fakeGCE *gce.Cloud) *Jig {
fakeIGs := instancegroups.NewEmptyFakeInstanceGroups()

nodeInformer := zonegetter.FakeNodeInformer()
fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer)
zonegetter.AddFakeNodes(fakeZoneGetter, defaultZone, "test-instance")
fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false)
zonegetter.AddFakeNodes(fakeZoneGetter, defaultTestZone, "test-instance")

fakeInstancePool := instancegroups.NewManager(&instancegroups.ManagerConfig{
Cloud: fakeIGs,
Expand Down Expand Up @@ -95,7 +95,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil {
t.Fatalf("Did not expect error when syncing backend with port %v", sp.NodePort)
}
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil {
t.Fatalf("Did not expect error when linking backend with port %v to groups", sp.NodePort)
}

Expand All @@ -106,8 +106,8 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
// Simulate another controller updating the same backend service with
// a different instance group
newGroups := []*compute.Backend{
{Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultZone, "k8s-ig-bar")},
{Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultZone, "k8s-ig-foo")},
{Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultTestZone, "k8s-ig-bar")},
{Group: fmt.Sprintf("zones/%s/instanceGroups/%s", defaultTestZone, "k8s-ig-foo")},
}
be.Backends = append(be.Backends, newGroups...)
if err = fakeGCE.UpdateGlobalBackendService(be); err != nil {
Expand All @@ -123,7 +123,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil {
t.Fatalf("Did not expect error when syncing backend with port %v", sp.NodePort)
}
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil {
t.Fatalf("Did not expect error when linking backend with port %v to groups", sp.NodePort)
}

Expand All @@ -141,7 +141,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
}

// seed expectedGroups with the first group native to this controller
expectedGroups := sets.NewString(fmt.Sprintf("zones/%s/instanceGroups/%s", defaultZone, "k8s-ig--uid1"))
expectedGroups := sets.NewString(fmt.Sprintf("zones/%s/instanceGroups/%s", defaultTestZone, "k8s-ig--uid1"))
for _, newGroup := range newGroups {
igPath, err := utils.ResourcePath(newGroup.Group)
if err != nil {
Expand All @@ -168,7 +168,7 @@ func TestSyncChaosMonkey(t *testing.T) {
if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil {
t.Fatalf("Did not expect error when syncing backend with port %v, err: %v", sp.NodePort, err)
}
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil {
t.Fatalf("Did not expect error when linking backend with port %v to groups, err: %v", sp.NodePort, err)
}

Expand Down Expand Up @@ -201,7 +201,7 @@ func TestSyncChaosMonkey(t *testing.T) {
if err := jig.syncer.Sync([]utils.ServicePort{sp}, klog.TODO()); err != nil {
t.Fatalf("Did not expect error when syncing backend with port %v", sp.NodePort)
}
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultZone}}); err != nil {
if err := jig.linker.Link(sp, []GroupKey{{Zone: defaultTestZone}}); err != nil {
t.Fatalf("Did not expect error when linking backend with port %v to groups", sp.NodePort)
}
if createCalls > 0 {
Expand All @@ -212,7 +212,7 @@ func TestSyncChaosMonkey(t *testing.T) {
if err != nil {
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
}
gotGroup, err := jig.fakeInstancePool.Get(defaultNamer.InstanceGroup(), defaultZone)
gotGroup, err := jig.fakeInstancePool.Get(defaultNamer.InstanceGroup(), defaultTestZone)
if err != nil {
t.Fatalf("Failed to find instance group %v", defaultNamer.InstanceGroup())
}
Expand Down
21 changes: 12 additions & 9 deletions pkg/backends/regional_ig_linker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cloud-provider-gcp/providers/gce"
"k8s.io/ingress-gce/pkg/composite"
"k8s.io/ingress-gce/pkg/instancegroups"
"k8s.io/ingress-gce/pkg/network"
"k8s.io/ingress-gce/pkg/test"
Expand Down Expand Up @@ -54,7 +53,7 @@ func newTestRegionalIgLinker(fakeGCE *gce.Cloud, backendPool *Backends, l4Namer
fakeIGs := instancegroups.NewEmptyFakeInstanceGroups()

nodeInformer := zonegetter.FakeNodeInformer()
fakeZoneGetter := zonegetter.NewZoneGetter(nodeInformer)
fakeZoneGetter := zonegetter.NewFakeZoneGetter(nodeInformer, defaultTestSubnetURL, false)
zonegetter.AddFakeNodes(fakeZoneGetter, usCentral1AZone, "test-instance1")
zonegetter.AddFakeNodes(fakeZoneGetter, "us-central1-c", "test-instance2")

Expand Down Expand Up @@ -236,13 +235,17 @@ func TestRegionalUpdateLinkWithRemovedBackends(t *testing.T) {

func createBackendService(t *testing.T, sp utils.ServicePort, backendPool *Backends) {
t.Helper()
namespacedName := types.NamespacedName{Name: "service.Name", Namespace: "service.Namespace"}
protocol := string(apiv1.ProtocolTCP)
serviceAffinityNone := string(apiv1.ServiceAffinityNone)
schemeExternal := string(cloud.SchemeExternal)
defaultNetworkInfo := network.NetworkInfo{IsDefault: true}
var noConnectionTrackingPolicy *composite.BackendServiceConnectionTrackingPolicy = nil
if _, err := backendPool.EnsureL4BackendService(sp.BackendName(), hcLink, protocol, serviceAffinityNone, schemeExternal, namespacedName, defaultNetworkInfo, noConnectionTrackingPolicy, klog.TODO()); err != nil {
backendParams := L4BackendServiceParams{
Name: sp.BackendName(),
HealthCheckLink: hcLink,
Protocol: string(apiv1.ProtocolTCP),
SessionAffinity: string(apiv1.ServiceAffinityNone),
Scheme: string(cloud.SchemeExternal),
NamespacedName: types.NamespacedName{Name: "service.Name", Namespace: "service.Namespace"},
NetworkInfo: &network.NetworkInfo{IsDefault: true},
ConnectionTrackingPolicy: nil,
}
if _, err := backendPool.EnsureL4BackendService(backendParams, klog.TODO()); err != nil {
t.Fatalf("Error creating backend service %v", err)
}
}
Loading