Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a test that can trigger a map race condition. #192

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions xds/snapshotter.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,10 @@ type Snapshotter struct {

// Node keeps the info for a node
type Node struct {
address string
resources *NodeSnapshotResources
address string
resources *NodeSnapshotResources
serviceMu sync.Mutex
endpointsMu sync.Mutex
}

// NodeSnapshot keeps resources and versions to help snapshotting per node
Expand Down Expand Up @@ -400,8 +402,10 @@ func (s *Snapshotter) updateNodeServiceSnapshotResources(nodeID, typeURL string,
if err != nil {
return fmt.Errorf("Cannot get resources from cache: %s", err)
}
node.serviceMu.Lock()
node.resources.services[typeURL] = newSnapResources
node.resources.servicesNames[typeURL] = resources
node.serviceMu.Unlock()
s.nodes.Store(nodeID, node)
return nil
}
Expand All @@ -418,8 +422,10 @@ func (s *Snapshotter) updateNodeEndpointsSnapshotResources(nodeID, typeURL strin
if err != nil {
return fmt.Errorf("Cannot get resources from cache: %s", err)
}
node.endpointsMu.Lock()
node.resources.endpoints[typeURL] = newSnapResources
node.resources.endpointsNames[typeURL] = resources
node.endpointsMu.Unlock()
s.nodes.Store(nodeID, node)
return nil
}
Expand All @@ -433,6 +439,10 @@ func (s *Snapshotter) nodeServiceSnapshot(nodeID string) error {
}
node := n.(Node)
atomic.AddInt32(&node.resources.serviceSnapVersion, 1)
// Node snapshot is going to range over node resources, thus we need to lock in case we are
// snapshotting at the same time.
node.serviceMu.Lock()
defer node.serviceMu.Unlock()
snapshot, err := cache.NewSnapshot(fmt.Sprint(node.resources.serviceSnapVersion), node.resources.services)
if err != nil {
return err
Expand All @@ -449,6 +459,10 @@ func (s *Snapshotter) nodeEndpointsSnapshot(nodeID string) error {
}
node := n.(Node)
atomic.AddInt32(&node.resources.endpointsSnapVersion, 1)
// Node snapshot is going to range over node resources, thus we need to lock in case we are
// snapshotting at the same time.
node.endpointsMu.Lock()
defer node.endpointsMu.Unlock()
snapshot, err := cache.NewSnapshot(fmt.Sprint(node.resources.endpointsSnapVersion), node.resources.endpoints)
if err != nil {
return err
Expand Down
87 changes: 87 additions & 0 deletions xds/snapshotter_test.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
package xds

import (
"sync"
"testing"

clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/stretchr/testify/assert"
"github.com/utilitywarehouse/semaphore-xds/log"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -362,3 +366,86 @@ func TestSnapServices_NodeSnapshotResources(t *testing.T) {
}
assert.Equal(t, 0, len(snap.GetResources(resource.ListenerType)))
}

func TestOnStreamRequest(t *testing.T) {
snapshotter := NewSnapshotter(uint(0), float64(0), float64(0))
serviceStore := NewXdsServiceStore()
streamID := int64(1)
peerAddr := "10.0.0.1"
snapshotter.streams.Store(streamID, Stream{
peerAddress: peerAddr,
requestRateLimit: rate.NewLimiter(rate.Limit(snapshotter.streamRequestPerSecond), 1),
})
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "bar",
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
v1.ServicePort{
Name: "test",
Port: int32(80),
}},
},
}
serviceStore.AddOrUpdate(svc, Service{
Policy: clusterv3.Cluster_ROUND_ROBIN,
EnableRemoteEndpoints: false,
PrioritizeLocalEndpoints: false,
})
snapshotter.SnapServices(serviceStore)

// Send OnStreamRequests for all resources types, while triggering Snapshots
// We should verify this does not end in concurrent map accesses
var wg sync.WaitGroup
onStreamReqListener := func() {
if err := snapshotter.OnStreamRequest(streamID, &discovery.DiscoveryRequest{
TypeUrl: resource.ListenerType,
ResourceNames: []string{"foo.bar:80"},
Node: &corev3.Node{Id: "test"},
}); err != nil {
t.Fatal(err)
}
wg.Done()
}
onStreamReqCluster := func() {
if err := snapshotter.OnStreamRequest(streamID, &discovery.DiscoveryRequest{
TypeUrl: resource.ClusterType,
ResourceNames: []string{"foo.bar.80"},
Node: &corev3.Node{Id: "test"},
}); err != nil {
t.Fatal(err)
}
wg.Done()
}
onStreamReqRoute := func() {
if err := snapshotter.OnStreamRequest(streamID, &discovery.DiscoveryRequest{
TypeUrl: resource.RouteType,
ResourceNames: []string{"foo.bar:80"},
Node: &corev3.Node{Id: "test"},
}); err != nil {
t.Fatal(err)
}
wg.Done()
}
wg.Add(3)
go snapshotter.SnapServices(serviceStore)
go onStreamReqListener()
go snapshotter.SnapServices(serviceStore)
go onStreamReqCluster()
go snapshotter.SnapServices(serviceStore)
go onStreamReqRoute()
go snapshotter.SnapServices(serviceStore)
wg.Wait()
// Get "test" node snapshot and verify that the Listener resource is included
snap, err := snapshotter.servicesCache.GetSnapshot("test")
if err != nil {
t.Fatal(err)
}
// Verify that our snapshot will have a single listener, route and
// cluster
assert.Equal(t, 1, len(snap.GetResources(resource.ListenerType)))
assert.Equal(t, 1, len(snap.GetResources(resource.ClusterType)))
assert.Equal(t, 1, len(snap.GetResources(resource.RouteType)))
}
Loading