Skip to content

Commit b3cef9e

Browse files
authored
Merge pull request #793 from apprenda/node-labels
Add node labels option in the plan file to label nodes
2 parents ec9269d + 230ba8f commit b3cef9e

26 files changed

+546
-28
lines changed

ansible/_label-nodes.yaml

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
---
2+
- hosts: master:worker:ingress:storage
3+
any_errors_fatal: true
4+
name: Label Kubernetes Nodes
5+
serial: "{{ serial_count | default('100%') }}"
6+
become: yes
7+
vars_files:
8+
- group_vars/all.yaml
9+
10+
tasks:
11+
- name: label nodes
12+
command: kubectl label --overwrite nodes --selector kismatic/host={{ inventory_hostname }} --kubeconfig {{ kubernetes_kubeconfig_path }} {{ node_labels[inventory_hostname] | join(" ") }}
13+
when: node_labels[inventory_hostname] is defined and node_labels[inventory_hostname]|length > 0

ansible/kubernetes-worker.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
- include: _docker.yaml
1010
- include: _kubelet.yaml
1111
- include: _kube-proxy.yaml
12+
- include: _label-nodes.yaml
1213
- include: _cni.yaml
1314
when: cni.enabled|bool == true
1415
- include: _calico.yaml

ansible/kubernetes.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
# kubelet does not have an API yet to retrieve the status of a DS pod
2525
# after installing kube-proxy, there is a dependecy on the API server to validate the static pod
2626
- include: _kube-proxy.yaml
27+
- include: _label-nodes.yaml
2728
- include: _cni.yaml
2829
when: cni.enabled|bool == true
2930
- include: _calico.yaml

ansible/roles/kubelet/templates/kubelet.service

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ ExecStart=/usr/bin/kubelet \
2121
--hostname-override={{ inventory_hostname }} \
2222
--require-kubeconfig=true \
2323
--kubeconfig={{ kubernetes_kubeconfig.kubelet }} \
24-
--node-labels=kismatic/host={{ inventory_hostname }},kismatic/cni-provider={{ cni.provider|default("")}}{% if 'ingress' in group_names%},kismatic/ingress=true{% endif %}{% if 'storage' in group_names%},kismatic/storage=true{% endif %}{% if inventory_hostname in groups['master'] %},node-role.kubernetes.io/master={% endif %} \
24+
--node-labels=kismatic/host={{ inventory_hostname }},kismatic/cni-provider={{ cni.provider|default("")}}{% if 'ingress' in group_names%},kismatic/ingress=true{% endif %}{% if 'storage' in group_names%},kismatic/storage=true{% endif %}{% if 'master' in group_names %},node-role.kubernetes.io/master={% endif %} \
2525
--node-ip={{ internal_ipv4 }} \
2626
--pod-infra-container-image={{ pause_img }} \
2727
--pod-manifest-path={{ kubelet_pod_manifests_dir }} \

ansible/roles/validate-pod/tasks/validate-pod.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
register: docker_logs
4040
when: containerID is defined and containerID|success and containerID.stdout is defined and containerID.stdout != ""
4141

42-
- name: fail if pod '{{ name }}'
42+
- name: fail if pod '{{ name }}' is not running
4343
fail:
4444
msg: |
4545
Waited for pod '{{ name }}' to be running, but it did not start up in time.

ansible/upgrade-nodes.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
- include: _validate-control-plane-node.yaml serial_count="1" upgrading=true
3434
- include: _kube-proxy-stop.yaml play_name="Upgrade Kubernetes Proxy" upgrading=true
3535
- include: _kube-proxy.yaml play_name="Upgrade Kubernetes Proxy" upgrading=true
36+
- include: _label-nodes.yaml
3637
- include: _cni.yaml play_name="Upgrade Kubernetes CNI" upgrading=true
3738
when: cni.enabled|bool == true
3839
- include: _calico.yaml play_name="Upgrade Calico Cluster Network" upgrading=true

cmd/gen-kismatic-ref-docs/main.go

+11-3
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,13 @@ func docForType(typeName string, allTypes []*godoc.Type, parentFieldName string)
133133
if isStruct(typeName) {
134134
docs = append(docs, docForType(typeName, allTypes, fieldName)...)
135135
}
136+
case *ast.MapType:
137+
typeName = fmt.Sprintf("map[%s]%s", x.Key.(*ast.Ident).Name, x.Value.(*ast.Ident).Name)
138+
d, err := parseDoc(fieldName, typeName, f.Doc.Text())
139+
if err != nil {
140+
panic(err)
141+
}
142+
docs = append(docs, d)
136143
default:
137144
panic(fmt.Sprintf("unhandled typespec type: %q", reflect.TypeOf(x).Name()))
138145
}
@@ -198,7 +205,8 @@ func isStruct(s string) bool {
198205

199206
// not a comprehensive list, but works for now...
200207
var basicTypes = map[string]bool{
201-
"bool": true,
202-
"int": true,
203-
"string": true,
208+
"bool": true,
209+
"int": true,
210+
"string": true,
211+
"map[string]string": true,
204212
}

docs/plan-file-reference.md

+55
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@
7676
* [host](#etcdnodeshost)
7777
* [ip](#etcdnodesip)
7878
* [internalip](#etcdnodesinternalip)
79+
* [labels](#etcdnodeslabels)
7980
* [master](#master)
8081
* [expected_count](#masterexpected_count)
8182
* [load_balanced_fqdn](#masterload_balanced_fqdn)
@@ -84,24 +85,28 @@
8485
* [host](#masternodeshost)
8586
* [ip](#masternodesip)
8687
* [internalip](#masternodesinternalip)
88+
* [labels](#masternodeslabels)
8789
* [worker](#worker)
8890
* [expected_count](#workerexpected_count)
8991
* [nodes](#workernodes)
9092
* [host](#workernodeshost)
9193
* [ip](#workernodesip)
9294
* [internalip](#workernodesinternalip)
95+
* [labels](#workernodeslabels)
9396
* [ingress](#ingress)
9497
* [expected_count](#ingressexpected_count)
9598
* [nodes](#ingressnodes)
9699
* [host](#ingressnodeshost)
97100
* [ip](#ingressnodesip)
98101
* [internalip](#ingressnodesinternalip)
102+
* [labels](#ingressnodeslabels)
99103
* [storage](#storage)
100104
* [expected_count](#storageexpected_count)
101105
* [nodes](#storagenodes)
102106
* [host](#storagenodeshost)
103107
* [ip](#storagenodesip)
104108
* [internalip](#storagenodesinternalip)
109+
* [labels](#storagenodeslabels)
105110
* [nfs](#nfs)
106111
* [nfs_volume](#nfsnfs_volume)
107112
* [nfs_host](#nfsnfs_volumenfs_host)
@@ -717,6 +722,16 @@
717722
| **Required** | No |
718723
| **Default** | ` ` |
719724

725+
### etcd.nodes.labels
726+
727+
Labels to add when installing the node in the cluster. If a node is defined under multiple roles, the labels for that node will be merged. If a label is repeated for the same node, only one will be used in this order: etcd,master,worker,ingress,storage roles where 'storage' has the highest precedence. It is recommended to use reverse-DNS notation to avoid collision with other labels.
728+
729+
| | |
730+
|----------|-----------------|
731+
| **Kind** | map[string]string |
732+
| **Required** | No |
733+
| **Default** | ` ` |
734+
720735
## master
721736

722737
Master nodes of the cluster
@@ -785,6 +800,16 @@
785800
| **Required** | No |
786801
| **Default** | ` ` |
787802

803+
### master.nodes.labels
804+
805+
Labels to add when installing the node in the cluster. If a node is defined under multiple roles, the labels for that node will be merged. If a label is repeated for the same node, only one will be used in this order: etcd,master,worker,ingress,storage roles where 'storage' has the highest precedence. It is recommended to use reverse-DNS notation to avoid collision with other labels.
806+
807+
| | |
808+
|----------|-----------------|
809+
| **Kind** | map[string]string |
810+
| **Required** | No |
811+
| **Default** | ` ` |
812+
788813
## worker
789814

790815
Worker nodes of the cluster
@@ -833,6 +858,16 @@
833858
| **Required** | No |
834859
| **Default** | ` ` |
835860

861+
### worker.nodes.labels
862+
863+
Labels to add when installing the node in the cluster. If a node is defined under multiple roles, the labels for that node will be merged. If a label is repeated for the same node, only one will be used in this order: etcd,master,worker,ingress,storage roles where 'storage' has the highest precedence. It is recommended to use reverse-DNS notation to avoid collision with other labels.
864+
865+
| | |
866+
|----------|-----------------|
867+
| **Kind** | map[string]string |
868+
| **Required** | No |
869+
| **Default** | ` ` |
870+
836871
## ingress
837872

838873
Ingress nodes of the cluster
@@ -881,6 +916,16 @@
881916
| **Required** | No |
882917
| **Default** | ` ` |
883918

919+
### ingress.nodes.labels
920+
921+
Labels to add when installing the node in the cluster. If a node is defined under multiple roles, the labels for that node will be merged. If a label is repeated for the same node, only one will be used in this order: etcd,master,worker,ingress,storage roles where 'storage' has the highest precedence. It is recommended to use reverse-DNS notation to avoid collision with other labels.
922+
923+
| | |
924+
|----------|-----------------|
925+
| **Kind** | map[string]string |
926+
| **Required** | No |
927+
| **Default** | ` ` |
928+
884929
## storage
885930

886931
Storage nodes of the cluster.
@@ -929,6 +974,16 @@
929974
| **Required** | No |
930975
| **Default** | ` ` |
931976

977+
### storage.nodes.labels
978+
979+
Labels to add when installing the node in the cluster. If a node is defined under multiple roles, the labels for that node will be merged. If a label is repeated for the same node, only one will be used in this order: etcd,master,worker,ingress,storage roles where 'storage' has the highest precedence. It is recommended to use reverse-DNS notation to avoid collision with other labels.
980+
981+
| | |
982+
|----------|-----------------|
983+
| **Kind** | map[string]string |
984+
| **Required** | No |
985+
| **Default** | ` ` |
986+
932987
## nfs
933988

934989
NFS volumes of the cluster.

integration/add_worker.go

+4-1
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,12 @@ import (
99
. "github.com/onsi/ginkgo"
1010
)
1111

12-
func addWorkerToCluster(newWorker NodeDeets, sshKey string) error {
12+
func addWorkerToCluster(newWorker NodeDeets, sshKey string, labels []string) error {
1313
By("Adding new worker")
1414
cmd := exec.Command("./kismatic", "install", "add-worker", "-f", "kismatic-testing.yaml", newWorker.Hostname, newWorker.PublicIP, newWorker.PrivateIP)
15+
if len(labels) > 0 {
16+
cmd.Args = append(cmd.Args, "--labels", strings.Join(labels, ","))
17+
}
1518
cmd.Stdout = os.Stdout
1619
cmd.Stderr = os.Stderr
1720
if err := cmd.Run(); err != nil {

integration/hosts_file_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ var _ = Describe("hosts file modification feature", func() {
5151
FailIfError(err)
5252

5353
By("Adding a worker with a bogus hostname that is added to hosts files")
54-
err = addWorkerToCluster(nodes.worker[3], sshKey)
54+
err = addWorkerToCluster(nodes.worker[3], sshKey, []string{})
5555
FailIfError(err)
5656
})
5757
})

integration/install_test.go

+11-3
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ var _ = Describe("kismatic", func() {
234234

235235
sub.It("should allow adding a worker node", func() error {
236236
newWorker := allWorkers[len(allWorkers)-1]
237-
return addWorkerToCluster(newWorker, sshKey)
237+
return addWorkerToCluster(newWorker, sshKey, []string{"com.integrationtest/worker=true"})
238238
})
239239

240240
sub.It("should be able to deploy a workload with ingress", func() error {
@@ -257,6 +257,10 @@ var _ = Describe("kismatic", func() {
257257
sub.It("should have tiller running", func() error {
258258
return verifyTiller(nodes.master[0], sshKey)
259259
})
260+
261+
sub.It("nodes should contain expected labels", func() error {
262+
return containsLabels(nodes, sshKey)
263+
})
260264
})
261265
})
262266
})
@@ -284,7 +288,7 @@ var _ = Describe("kismatic", func() {
284288

285289
sub.It("should allow adding a worker node", func() error {
286290
newWorker := allWorkers[len(allWorkers)-1]
287-
return addWorkerToCluster(newWorker, sshKey)
291+
return addWorkerToCluster(newWorker, sshKey, []string{"com.integrationtest/worker=true"})
288292
})
289293

290294
sub.It("should be able to deploy a workload with ingress", func() error {
@@ -307,6 +311,10 @@ var _ = Describe("kismatic", func() {
307311
sub.It("should have tiller running", func() error {
308312
return verifyTiller(nodes.master[0], sshKey)
309313
})
314+
315+
sub.It("nodes should contain expected labels", func() error {
316+
return containsLabels(nodes, sshKey)
317+
})
310318
})
311319
})
312320
})
@@ -334,7 +342,7 @@ var _ = Describe("kismatic", func() {
334342

335343
// sub.It("should allow adding a worker node", func() error {
336344
// newWorker := allWorkers[len(allWorkers)-1]
337-
// return addWorkerToCluster(newWorker, sshKey)
345+
// return addWorkerToCluster(newWorker, sshKey, []string{})
338346
// })
339347

340348
// // This test is flaky with contiv

integration/labels.go

+40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
package integration
2+
3+
import (
4+
"fmt"
5+
"time"
6+
)
7+
8+
func containsLabels(nodes provisionedNodes, sshKey string) error {
9+
// labels were hardcoded in th plan pattern
10+
tests := []struct {
11+
nodes []NodeDeets
12+
label string
13+
}{
14+
{
15+
nodes: nodes.master,
16+
label: "com.integrationtest/master:true",
17+
},
18+
{
19+
nodes: nodes.worker,
20+
label: "com.integrationtest/worker:true",
21+
},
22+
{
23+
nodes: nodes.ingress,
24+
label: "com.integrationtest/ingress:true",
25+
},
26+
{
27+
nodes: nodes.storage,
28+
label: "com.integrationtest/storage:true",
29+
},
30+
}
31+
for _, role := range tests {
32+
for _, n := range role.nodes {
33+
if err := runViaSSH([]string{fmt.Sprintf("sudo kubectl get nodes %s -o jsonpath='{.metadata.labels}' | grep %q", n.Hostname, role.label)}, []NodeDeets{nodes.master[0]}, sshKey, 1*time.Minute); err != nil {
34+
return fmt.Errorf("error validating node %q label: %v", n.Hostname, err)
35+
}
36+
}
37+
}
38+
39+
return nil
40+
}

integration/plan_patterns.go

+12-4
Original file line numberDiff line numberDiff line change
@@ -108,27 +108,35 @@ master:
108108
nodes:{{range .Master}}
109109
- host: {{.Hostname}}
110110
ip: {{.PublicIP}}
111-
internalip: {{.PrivateIP}}{{end}}
111+
internalip: {{.PrivateIP}}
112+
labels:
113+
com.integrationtest/master: true{{end}}
112114
load_balanced_fqdn: {{.MasterNodeFQDN}}
113115
load_balanced_short_name: {{.MasterNodeShortName}}
114116
worker:
115117
expected_count: {{len .Worker}}
116118
nodes:{{range .Worker}}
117119
- host: {{.Hostname}}
118120
ip: {{.PublicIP}}
119-
internalip: {{.PrivateIP}}{{end}}
121+
internalip: {{.PrivateIP}}
122+
labels:
123+
com.integrationtest/worker: true{{end}}
120124
ingress:
121125
expected_count: {{len .Ingress}}
122126
nodes:{{range .Ingress}}
123127
- host: {{.Hostname}}
124128
ip: {{.PublicIP}}
125-
internalip: {{.PrivateIP}}{{end}}
129+
internalip: {{.PrivateIP}}
130+
labels:
131+
com.integrationtest/ingress: true{{end}}
126132
storage:
127133
expected_count: {{len .Storage}}
128134
nodes:{{range .Storage}}
129135
- host: {{.Hostname}}
130136
ip: {{.PublicIP}}
131-
internalip: {{.PrivateIP}}{{end}}
137+
internalip: {{.PrivateIP}}
138+
labels:
139+
com.integrationtest/storage: true{{end}}
132140
nfs:
133141
nfs_volume:{{range .NFSVolume}}
134142
- nfs_host: {{.Host}}

integration/upgrade_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ var _ = Describe("Upgrade", func() {
6464

6565
sub.It("should allow adding a worker node", func() error {
6666
newWorker := allWorkers[len(allWorkers)-1]
67-
return addWorkerToCluster(newWorker, sshKey)
67+
return addWorkerToCluster(newWorker, sshKey, []string{})
6868
})
6969

7070
sub.It("should be able to deploy a workload with ingress", func() error {
@@ -248,7 +248,7 @@ var _ = Describe("Upgrade", func() {
248248

249249
sub.It("should allow adding a worker node", func() error {
250250
newWorker := allWorkers[len(allWorkers)-1]
251-
return addWorkerToCluster(newWorker, sshKey)
251+
return addWorkerToCluster(newWorker, sshKey, []string{})
252252
})
253253

254254
sub.It("should be able to deploy a workload with ingress", func() error {

pkg/ansible/clustercatalog.go

+2
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@ type ClusterCatalog struct {
121121
HTTPProxy string `yaml:"http_proxy"`
122122
HTTPSProxy string `yaml:"https_proxy"`
123123
NoProxy string `yaml:"no_proxy"`
124+
125+
NodeLabels map[string][]string `yaml:"node_labels"`
124126
}
125127

126128
type NFSVolume struct {

0 commit comments

Comments
 (0)