-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbenchmark-spec.yaml
133 lines (100 loc) · 2.68 KB
/
benchmark-spec.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Common service mesh benchmark configuration and result summary
# All times in UTC
start_time: "2019-01-01T12:00:00Z"
end_time: "2019-01-01T12:00:00Z"
# mesh details
mesh_build: "xx.xx.xx"
proxy_build: "xx.xx.xx"
# benchmark identifier
## optional: performance experiment group identifier (collect results of the same configuration under a common id)
exp_group_uuid: "uuid_string"
## individual performance experiment unique identifier
exp_uuid: "uuid_string"
# experiments have different standard mesh profiles
profile: "full, ingress_only, telemetry, encryption"
# location of posted results
details_uri: "https://xxx.xxx"
# environment: resource details and versions
env :
kubernetes: gke-1.10.5
node_count: 5
node:
type: "n32"
cores: 32
mem_mb: 4096
config:
mesh_policy_enabled: true
mesh_telemetry_enabled: true
mtls_enabled: true
proxy_concurrency: 0
client:
# Is the client inside or outside the mesh
# A client inside the mesh will not use ingress_gateway
internal: false
protocol: "http | tcp | grpc"
connections: 20
rps: 1000
# latency histogram in ms and average
latencies_ms:
min: 4
average: 20
p50: 22
p90: 29
p99: 40
max: 45
metrics:
ingress_gateway:
count: 5
# cpu in mCores
cpu_mCores: 2010
# memory in MB
mem_mb: 350
# traffic sent thru ingress gateway
rps: 1000
# Total bytes sent thru ingress
bps: 89000
sidecars:
count: 20
# cpu used by all sidecars except ingress / egress
cpu_mCores: 3000
mem_mb: 600
# Total rps traversing all sidecars
rps: 8000
# Total bytes sent thru sidecars
bps: 213004
mesh_telemetry:
count: 20
# cpu used by all mesh_telemetry pods
cpu_mCores: 3000
mem_mb: 600
# Total rps traversing all proxies (sidecars+ingress+egress)
rps: 8000
mesh_policy:
count: 20
# cpu used by all mesh_policy pods
cpu_mCores: 3000
mem_mb: 600
# Total rps traversing all proxies (sidecars+ingress+egress)
rps: 8000
cache_hit_rate: 99
mesh_control_plane:
count: 20
# cpu used by mesh_pilot pods
cpu_mCores: 3000
mem_mb: 600
endpoints: 200
services: 400 # services + service entries
sidecars: 120
virtual_services: 50
destination_rules: 55
# how long does it take a listener change to propagate to 90% proxies
lds_latency_ms: 1020
# how long does it take for cluster change to propagate to 90% proxies
cds_latency_ms: 1020
# additional individual workloads should be listed here
# Only sidecar metrics are measured
individual_workload_1:
name: "fortioserver"
count: 2
cpu_mCores: 55
mem_mb: 100