Skip to content

Commit 30fa093

Browse files
committed
Upgrade to k8s v1.8.4
1 parent a4de916 commit 30fa093

File tree

9 files changed

+288
-64
lines changed

9 files changed

+288
-64
lines changed

Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ ANSIBLE_VERSION = 2.3.0.0
1818
PROVISIONER_VERSION = v1.6.2
1919
KUBERANG_VERSION = v1.2.2
2020
GO_VERSION = 1.8.4
21-
KUBECTL_VERSION = v1.8.3
21+
KUBECTL_VERSION = v1.8.4
2222
HELM_VERSION = v2.7.0
2323

2424
ifeq ($(origin GLIDE_GOOS), undefined)

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ KET operational tools include:
2626
## Components
2727
| Component | Version |
2828
| --- | --- |
29-
| Kubernetes | v1.8.3 |
29+
| Kubernetes | v1.8.4 |
3030
| Docker | v1.12.6 |
3131
| Etcd (for Kubernetes) | v3.1.10 |
3232
| Etcd (for Calico & Contiv) | v3.1.10 |

ansible/group_vars/all.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#===============================================================================
22
# VERSIONS
3-
kubernetes_yum_version: 1.8.3-0
4-
kubernetes_deb_version: 1.8.3-00
3+
kubernetes_yum_version: 1.8.4-0
4+
kubernetes_deb_version: 1.8.4-00
55
docker_engine_yum_version: 1.12.6-1.el7.centos
66
docker_engine_apt_version: 1.12.6-0~ubuntu-xenial
77
glusterfs_server_version_rhel: "3.8.15-2.el7"

ansible/group_vars/container_images.yaml

+4-4
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,16 @@ official_images:
44
version: v3.1.10
55
kube_proxy:
66
name: gcr.io/google-containers/kube-proxy-amd64
7-
version: v1.8.3
7+
version: v1.8.4
88
kube_controller_manager:
99
name: gcr.io/google-containers/kube-controller-manager-amd64
10-
version: v1.8.3
10+
version: v1.8.4
1111
kube_scheduler:
1212
name: gcr.io/google-containers/kube-scheduler-amd64
13-
version: v1.8.3
13+
version: v1.8.4
1414
kube_apiserver:
1515
name: gcr.io/google-containers/kube-apiserver-amd64
16-
version: v1.8.3
16+
version: v1.8.4
1717
calico_node:
1818
name: calico/node
1919
version: v2.6.2

docs/packages.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ EOF'
3939
| Component | Install Command |
4040
| ---- | ---- |
4141
| Etcd Node | `sudo yum -y install docker-engine-1.12.6-1.el7.centos` |
42-
| Kubernetes Node | `sudo yum -y install docker-engine-1.12.6-1.el7.centos nfs-utils kubelet-1.8.3-0 kubectl-1.8.3-0` |
42+
| Kubernetes Node | `sudo yum -y install docker-engine-1.12.6-1.el7.centos nfs-utils kubelet-1.8.4-0 kubectl-1.8.4-0` |
4343

4444
## Installing via DEB (Ubuntu Xenial)
4545

@@ -90,7 +90,7 @@ sudo apt-get update
9090
| Component | Install Command |
9191
| ---- | ---- |
9292
| Etcd Node | `sudo apt-get install -y docker-engine=1.12.6-0~ubuntu-xenial` |
93-
| Kubernetes Node | `sudo apt-get install -y docker-engine=1.12.6-0~ubuntu-xenial nfs-common kubelet=1.8.3-00 kubectl=1.8.3-00` |
93+
| Kubernetes Node | `sudo apt-get install -y docker-engine=1.12.6-0~ubuntu-xenial nfs-common kubelet=1.8.4-00 kubectl=1.8.4-00` |
9494

9595
#### Stop the kubelet
9696
When the Ubuntu kubelet package is installed the service will be started and will bind to ports. This will cause some preflight port checks to fail.

docs/provision.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -123,14 +123,14 @@ If you are building a large cluster or one that won't have access to these repos
123123
<td></td>
124124
</tr>
125125
<tr>
126-
<td>Kismatic package of Kubernetes kubelet 1.8.3</td>
126+
<td>Kismatic package of Kubernetes kubelet 1.8.4</td>
127127
<td>Kubernetes</td>
128128
<td></td>
129129
<td>yes</td>
130130
<td>yes</td>
131131
</tr>
132132
<tr>
133-
<td>Kismatic package of Kubernetes kubectl 1.8.3</td>
133+
<td>Kismatic package of Kubernetes kubectl 1.8.4</td>
134134
<td>Kubernetes</td>
135135
<td></td>
136136
<td>yes</td>

integration/prepare.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,9 @@ EOF`
5252
moveGlusterRepoFileYum = `sudo mv /tmp/gluster.repo /etc/yum.repos.d`
5353

5454
installDockerYum = `sudo yum -y install docker-engine-1.12.6-1.el7.centos`
55-
installKubeletYum = `sudo yum -y install kubelet-1.8.3-0`
55+
installKubeletYum = `sudo yum -y install kubelet-1.8.4-0`
5656
installNFSUtilsYum = `sudo yum -y install nfs-utils` // required for the kubelet
57-
installKubectlYum = `sudo yum -y install kubectl-1.8.3-0`
57+
installKubectlYum = `sudo yum -y install kubectl-1.8.4-0`
5858
installGlusterfsServerYum = `sudo yum -y install --nogpgcheck glusterfs-server-3.8.15-2.el7`
5959

6060
updateAptGet = `sudo apt-get update`
@@ -64,10 +64,10 @@ EOF`
6464

6565
addKubernetesRepoKeyApt = `wget -qO - https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -`
6666
addKubernetesRepoApt = `sudo add-apt-repository "deb https://packages.cloud.google.com/apt/ kubernetes-xenial main"`
67-
installKubeletApt = `sudo apt-get -y install kubelet=1.8.3-00`
67+
installKubeletApt = `sudo apt-get -y install kubelet=1.8.4-00`
6868
stopKubeletService = `sudo systemctl stop kubelet`
6969
installNFSCommonApt = `sudo apt-get -y install nfs-common`
70-
installKubectlApt = `sudo apt-get -y install kubectl=1.8.3-00`
70+
installKubectlApt = `sudo apt-get -y install kubectl=1.8.4-00`
7171

7272
addGlusterRepoApt = `sudo add-apt-repository -y ppa:gluster/glusterfs-3.8`
7373
installGlusterfsServerApt = `sudo apt-get -y install glusterfs-server=3.8.15-ubuntu1~xenial1`

integration/upgrade_test.go

+224
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,230 @@ import (
1212
var _ = Describe("Upgrade", func() {
1313

1414
Describe("Upgrading a cluster using online mode", func() {
15+
Context("From KET version v1.6.2", func() {
16+
BeforeEach(func() {
17+
dir := setupTestWorkingDirWithVersion("v1.6.2")
18+
os.Chdir(dir)
19+
})
20+
21+
Context("Using a minikube layout", func() {
22+
Context("Using CentOS 7", func() {
23+
ItOnAWS("should be upgraded [slow] [upgrade]", func(aws infrastructureProvisioner) {
24+
WithMiniInfrastructure(CentOS7, aws, func(node NodeDeets, sshKey string) {
25+
installAndUpgradeMinikube(node, sshKey, true)
26+
})
27+
})
28+
})
29+
30+
Context("Using RedHat 7", func() {
31+
ItOnAWS("should be upgraded [slow] [upgrade]", func(aws infrastructureProvisioner) {
32+
WithMiniInfrastructure(RedHat7, aws, func(node NodeDeets, sshKey string) {
33+
installAndUpgradeMinikube(node, sshKey, true)
34+
})
35+
})
36+
})
37+
})
38+
39+
// This spec will be used for testing non-destructive kismatic features on
40+
// an upgraded cluster.
41+
// This spec is open to modification when new assertions have to be made.
42+
Context("Using a skunkworks cluster", func() {
43+
ItOnAWS("should result in an upgraded cluster [slow] [upgrade]", func(aws infrastructureProvisioner) {
44+
WithInfrastructureAndDNS(NodeCount{Etcd: 3, Master: 2, Worker: 3, Ingress: 2, Storage: 2}, Ubuntu1604LTS, aws, func(nodes provisionedNodes, sshKey string) {
45+
// reserve one of the workers for the add-worker test
46+
allWorkers := nodes.worker
47+
nodes.worker = allWorkers[0 : len(nodes.worker)-1]
48+
49+
// Standup cluster with previous version
50+
opts := installOptions{}
51+
err := installKismatic(nodes, opts, sshKey)
52+
FailIfError(err)
53+
54+
// Extract current version of kismatic
55+
extractCurrentKismaticInstaller()
56+
57+
// Perform upgrade
58+
upgradeCluster(true)
59+
60+
sub := SubDescribe("Using an upgraded cluster")
61+
defer sub.Check()
62+
63+
sub.It("should have working storage volumes", func() error {
64+
return testStatefulWorkload(nodes, sshKey)
65+
})
66+
67+
sub.It("should allow adding a worker node", func() error {
68+
newWorker := allWorkers[len(allWorkers)-1]
69+
return addWorkerToCluster(newWorker, sshKey, []string{})
70+
})
71+
72+
sub.It("should be able to deploy a workload with ingress", func() error {
73+
return verifyIngressNodes(nodes.master[0], nodes.ingress, sshKey)
74+
})
75+
76+
// Use master[0] public IP
77+
sub.It("should have an accessible dashboard", func() error {
78+
return canAccessDashboard(fmt.Sprintf("https://admin:abbazabba@%s:6443/ui", nodes.master[0].PublicIP))
79+
})
80+
81+
sub.It("should respect network policies", func() error {
82+
return verifyNetworkPolicy(nodes.master[0], sshKey, false)
83+
})
84+
85+
// This test should always be last
86+
sub.It("should still be a highly available cluster after upgrade", func() error {
87+
By("Removing a Kubernetes master node")
88+
if err = aws.TerminateNode(nodes.master[0]); err != nil {
89+
return fmt.Errorf("could not remove node: %v", err)
90+
}
91+
By("Re-running Kuberang")
92+
if err = runViaSSH([]string{"sudo kuberang"}, []NodeDeets{nodes.master[1]}, sshKey, 5*time.Minute); err != nil {
93+
return err
94+
}
95+
return nil
96+
})
97+
})
98+
})
99+
})
100+
101+
Context("Using a cluster that has no internet access [slow] [upgrade]", func() {
102+
Context("With nodes running CentOS 7", func() {
103+
ItOnAWS("should result in an upgraded cluster", func(aws infrastructureProvisioner) {
104+
distro := CentOS7
105+
WithInfrastructure(NodeCount{Etcd: 1, Master: 1, Worker: 2, Ingress: 1, Storage: 1}, distro, aws, func(nodes provisionedNodes, sshKey string) {
106+
// One of the nodes will function as a repo mirror and image registry
107+
repoNode := nodes.worker[1]
108+
nodes.worker = nodes.worker[0:1]
109+
// Standup cluster with previous version
110+
opts := installOptions{
111+
disconnectedInstallation: false, // we want KET to install the packages, so let it use the package repo
112+
modifyHostsFiles: true,
113+
}
114+
err := installKismatic(nodes, opts, sshKey)
115+
FailIfError(err)
116+
117+
// Extract current version of kismatic
118+
extractCurrentKismaticInstaller()
119+
120+
By("Creating a package repository")
121+
err = createPackageRepositoryMirror(repoNode, distro, sshKey)
122+
FailIfError(err, "Error creating local package repo")
123+
124+
By("Deploying a docker registry")
125+
caFile, err := deployAuthenticatedDockerRegistry(repoNode, dockerRegistryPort, sshKey)
126+
FailIfError(err, "Failed to deploy docker registry")
127+
128+
By("Seeding the local registry")
129+
err = seedRegistry(repoNode, caFile, dockerRegistryPort, sshKey)
130+
FailIfError(err, "Error seeding local registry")
131+
132+
err = disableInternetAccess(nodes.allNodes(), sshKey)
133+
FailIfError(err)
134+
135+
By("Configuring repository on nodes")
136+
for _, n := range nodes.allNodes() {
137+
err = copyFileToRemote("test-resources/disconnected-installation/configure-rpm-mirrors.sh", "/tmp/configure-rpm-mirrors.sh", n, sshKey, 15*time.Second)
138+
FailIfError(err, "Failed to copy script to nodes")
139+
}
140+
cmds := []string{
141+
"chmod +x /tmp/configure-rpm-mirrors.sh",
142+
fmt.Sprintf("sudo /tmp/configure-rpm-mirrors.sh http://%s", repoNode.PrivateIP),
143+
}
144+
err = runViaSSH(cmds, nodes.allNodes(), sshKey, 5*time.Minute)
145+
FailIfError(err, "Failed to run mirror configuration script")
146+
147+
if err := verifyNoInternetAccess(nodes.allNodes(), sshKey); err == nil {
148+
Fail("was able to ping google with outgoing connections blocked")
149+
}
150+
151+
// Cleanup old cluster file and create a new one
152+
By("Recreating kismatic-testing.yaml file")
153+
err = os.Remove("kismatic-testing.yaml")
154+
FailIfError(err)
155+
opts = installOptions{
156+
disconnectedInstallation: true,
157+
modifyHostsFiles: true,
158+
dockerRegistryCAPath: caFile,
159+
dockerRegistryServer: fmt.Sprintf("%s:%d", repoNode.PrivateIP, dockerRegistryPort),
160+
dockerRegistryUsername: "kismaticuser",
161+
dockerRegistryPassword: "kismaticpassword",
162+
}
163+
writePlanFile(buildPlan(nodes, opts, sshKey))
164+
165+
upgradeCluster(true)
166+
})
167+
})
168+
})
169+
170+
Context("With nodes running Ubuntu 16.04", func() {
171+
ItOnAWS("should result in an upgraded cluster", func(aws infrastructureProvisioner) {
172+
distro := Ubuntu1604LTS
173+
WithInfrastructure(NodeCount{Etcd: 1, Master: 1, Worker: 2, Ingress: 1, Storage: 1}, distro, aws, func(nodes provisionedNodes, sshKey string) {
174+
// One of the nodes will function as a repo mirror and image registry
175+
repoNode := nodes.worker[1]
176+
nodes.worker = nodes.worker[0:1]
177+
// Standup cluster with previous version
178+
opts := installOptions{
179+
disconnectedInstallation: false, // we want KET to install the packages, so let it use the package repo
180+
modifyHostsFiles: true,
181+
}
182+
err := installKismatic(nodes, opts, sshKey)
183+
FailIfError(err)
184+
185+
extractCurrentKismaticInstaller()
186+
187+
By("Creating a package repository")
188+
err = createPackageRepositoryMirror(repoNode, distro, sshKey)
189+
FailIfError(err, "Error creating local package repo")
190+
191+
By("Deploying a docker registry")
192+
caFile, err := deployAuthenticatedDockerRegistry(repoNode, dockerRegistryPort, sshKey)
193+
FailIfError(err, "Failed to deploy docker registry")
194+
195+
By("Seeding the local registry")
196+
err = seedRegistry(repoNode, caFile, dockerRegistryPort, sshKey)
197+
FailIfError(err, "Error seeding local registry")
198+
199+
err = disableInternetAccess(nodes.allNodes(), sshKey)
200+
FailIfError(err)
201+
202+
By("Configuring repository on nodes")
203+
for _, n := range nodes.allNodes() {
204+
err = copyFileToRemote("test-resources/disconnected-installation/configure-deb-mirrors.sh", "/tmp/configure-deb-mirrors.sh", n, sshKey, 15*time.Second)
205+
FailIfError(err, "Failed to copy script to nodes")
206+
}
207+
cmds := []string{
208+
"chmod +x /tmp/configure-deb-mirrors.sh",
209+
fmt.Sprintf("sudo /tmp/configure-deb-mirrors.sh http://%s", repoNode.PrivateIP),
210+
}
211+
err = runViaSSH(cmds, nodes.allNodes(), sshKey, 5*time.Minute)
212+
FailIfError(err, "Failed to run mirror configuration script")
213+
214+
if err := verifyNoInternetAccess(nodes.allNodes(), sshKey); err == nil {
215+
Fail("was able to ping google with outgoing connections blocked")
216+
}
217+
218+
// Cleanup old cluster file and create a new one
219+
By("Recreating kismatic-testing.yaml file")
220+
err = os.Remove("kismatic-testing.yaml")
221+
FailIfError(err)
222+
opts = installOptions{
223+
disconnectedInstallation: true,
224+
modifyHostsFiles: true,
225+
dockerRegistryCAPath: caFile,
226+
dockerRegistryServer: fmt.Sprintf("%s:%d", repoNode.PrivateIP, dockerRegistryPort),
227+
dockerRegistryUsername: "kismaticuser",
228+
dockerRegistryPassword: "kismaticpassword",
229+
}
230+
writePlanFile(buildPlan(nodes, opts, sshKey))
231+
232+
upgradeCluster(true)
233+
})
234+
})
235+
})
236+
})
237+
})
238+
15239
Context("From KET version v1.6.1", func() {
16240
BeforeEach(func() {
17241
dir := setupTestWorkingDirWithVersion("v1.6.1")

0 commit comments

Comments
 (0)