-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathlocal_development.sh
executable file
·143 lines (118 loc) · 5.14 KB
/
local_development.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#!/usr/bin/env bash
cd $HOME/git/pipelines/
set -e
set_env(){
export DOCKER_REGISTRY=docker.io
export DOCKER_USER=atanasovad
IMAGE=$DOCKER_REGISTRY/$DOCKER_USER/api-server
TAG=11.1.2
}
login(){
set_env
echo -e "Login: $DOCKER_REGISTRY/$DOCKER_USER"
#create file "docker.io/pass" that contain base64 encripted password for dockerhub(use: mkdir -p docker.io && echo "PASSWORD" | base64 > docker.io/pass )
DOCKER_PASSWORD=$(cat docker.io/pass | base64 --decode)
echo "$DOCKER_PASSWORD" | docker login $DOCKER_REGISTRY --username=$DOCKER_USER --password-stdin
}
build_img(){
login
docker build -t "${IMAGE}:${TAG}" -f backend/Dockerfile .
}
push_img(){
login
docker push ${IMAGE}:${TAG}
}
edit_pipeline_deployment(){
rm /tmp/mydeployment.yaml 2> /dev/null
kubectl get deployment.apps/ml-pipeline -o yaml -n kubeflow > /tmp/mydeployment.yaml
vi /tmp/mydeployment.yaml
kubectl apply -f /tmp/mydeployment.yaml -n kubeflow
echo "kubectl get all -n kubeflow | grep pipeli"
kubectl get all -n kubeflow | grep pipeli
}
print_mgs() {
echo "Please use one of the available commands:
build - Build a KFP image
push - Push a KFP imgage to DockerHub
edit - Edit pipeline deployment,so the newly create KFP image to be used in Kubelow cluster
forward - Expose cluster services locally (8080, mysql port 3306, minio 9000, visualizationserver -> 8888)
hack - make local code run as in-cluster/ handle certificates, tokens (need to be done after each computer/cluster restart)"
}
all(){
build_img
push_img
edit_pipeline_deployment
}
#only in case of Full Pipeline installation
hack(){
# copy in-cluster service account at /var/run/secrets/kubernetes.io/serviceaccount to local dev
sudo mkdir -p /var/run/secrets/kubernetes.io/serviceaccount
POD=$(kubectl get pods -n kubeflow -l app=ml-pipeline -o jsonpath='{.items[0].metadata.name}')
kubectl exec -ti $POD -c ml-pipeline-api-server -n kubeflow -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt > $HOME/ca.crt
kubectl exec -ti $POD -c ml-pipeline-api-server -n kubeflow -- cat /var/run/secrets/kubernetes.io/serviceaccount/token > $HOME/token
sudo mv $HOME/ca.crt /var/run/secrets/kubernetes.io/serviceaccount
sudo mv $HOME/token /var/run/secrets/kubernetes.io/serviceaccount
# copy in-cluster persistent-agent service account /var/run/secrets/tokens/ to local dev.
# This SA has an expiration time and become invalid in specific period. Also if the pod is deleted, a new Persistent agent service account is created
# Better to increase the expiration time by editing ` kubectl edit deploy ml-pipeline-persistenceagent -n kubeflow ` -> `volumes/ projected/ sources/ serviceAccountToken / expirationSeconds to a greated number.
# sudo mkdir -p /var/run/secrets/kubeflow/tokens/
# PA_POD=$(kubectl get pods -n kubeflow -l app=ml-pipeline-persistenceagent -o jsonpath='{.items[0].metadata.name}')
# kubectl exec -ti $PA_POD -n kubeflow -- cat /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token > $HOME/persistenceagent-sa-token
# sudo mv $HOME/persistenceagent-sa-token /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token
# copy samples to /samples in local dev
rm -rf $HOME/samples
sudo rm -rf /samples
kubectl cp kubeflow/$POD:/samples/ $HOME/samples/ -c ml-pipeline-api-server
sudo mv $HOME/samples /
# expose kubernetes API server, mysql, minio and visualization server (note it will listen on 8889 locally) on localhost
kubectl proxy --port=8082 &
kubectl port-forward svc/istio-ingressgateway -n istio-system 8080:80 &
kubectl port-forward -n kubeflow svc/mysql 3306 &
kubectl port-forward -n kubeflow svc/minio-service 9000 &
kubectl port-forward -n kubeflow svc/ml-pipeline-visualizationserver 8889:8888 &
}
if [[ $# -eq 0 ]] ; then
print_mgs
exit 0
fi
for i in "$@"
do
case $i in
build)
echo "Build a KFP image"
build_img
;;
push)
echo "Push the KFP image"
push_img
;;
edit)
echo "Edit pipeline deployment,so the newly create KFP image to be used in Kubelow cluster"
edit_pipeline_deployment
;;
all)
echo "Build, push image and apply a KFP image"
all
;;
hack)
echo "Hack so local KFP code run as in-cluster"
hack
;;
*)
print_mgs
;;
esac
done
#Enter in a pod's container (in case one container in a pod)
#kubectl exec --stdin --tty <pod> -n kubeflow -- /bin/bash
#exec individual commands in a container
#kubectl exec <pod> -- ps aux
#Enter in a container in case the pod has more than one container
#kubectl exec -i -t <pod> --container main-app -- /bin/bash
##############Run Locally the API-Server ##############
#Environment:
#KUBERNETES_SERVICE_HOST=127.0.0.1;KUBERNETES_SERVICE_PORT=8080;ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST=127.0.0.1;ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT=8889
#Package Path:
#github.com/kubeflow/pipelines/backend/src/apiserver
#Arguments:
#--config=/home/didi/git/pipelines/backend/src/apiserver/config --sampleconfig=/home/didi/git/pipelines/backend/src/apiserver/config/sample_config.json -logtostderr=true