Skip to content

Commit 2e1c2bc

Browse files
authored
*: remove cluster-scoped flag for operator-sdk new (operator-framework#1434)
1 parent a826ef8 commit 2e1c2bc

File tree

12 files changed

+48
-137
lines changed

12 files changed

+48
-137
lines changed

CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626

2727
- The SDK will no longer run `defaulter-gen` on running `operator-sdk generate k8s`. Defaulting for CRDs should be handled with mutating admission webhooks. ([#1288](https://github.com/operator-framework/operator-sdk/pull/1288))
2828
- **Breaking Change**: The `test cluster` subcommand and the corresponding `--enable-tests` flag for the `build` subcommand have been removed ([#1414](https://github.com/operator-framework/operator-sdk/pull/1414))
29+
- **Breaking Change**: The `--cluster-scoped` flag for `operator-sdk new` has been removed so it won't scaffold a cluster-scoped operator. Read the [operator scope](https://github.com/operator-framework/operator-sdk/blob/master/doc/operator-scope.md) documentation on the changes needed to run a cluster-scoped operator. ([#1434](https://github.com/operator-framework/operator-sdk/pull/1434))
2930

3031
### Bug Fixes
3132

cmd/operator-sdk/new/cmd.go

+9-11
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ generates a skeletal app-operator application in $GOPATH/src/github.com/example.
5858
newCmd.Flags().BoolVar(&skipGit, "skip-git-init", false, "Do not init the directory as a git repository")
5959
newCmd.Flags().StringVar(&headerFile, "header-file", "", "Path to file containing headers for generated Go files. Copied to hack/boilerplate.go.txt")
6060
newCmd.Flags().BoolVar(&generatePlaybook, "generate-playbook", false, "Generate a playbook skeleton. (Only used for --type ansible)")
61-
newCmd.Flags().BoolVar(&isClusterScoped, "cluster-scoped", false, "Generate cluster-scoped resources instead of namespace-scoped")
6261

6362
newCmd.Flags().StringVar(&helmChartRef, "helm-chart", "", "Initialize helm operator with existing helm chart (<URL>, <repo>/<name>, or local path)")
6463
newCmd.Flags().StringVar(&helmChartVersion, "helm-chart-version", "", "Specific version of the helm chart (default is latest version)")
@@ -76,7 +75,6 @@ var (
7675
headerFile string
7776
skipGit bool
7877
generatePlaybook bool
79-
isClusterScoped bool
8078

8179
helmChartRef string
8280
helmChartVersion string
@@ -187,9 +185,9 @@ func doGoScaffold() error {
187185
&scaffold.Entrypoint{},
188186
&scaffold.UserSetup{},
189187
&scaffold.ServiceAccount{},
190-
&scaffold.Role{IsClusterScoped: isClusterScoped},
191-
&scaffold.RoleBinding{IsClusterScoped: isClusterScoped},
192-
&scaffold.Operator{IsClusterScoped: isClusterScoped},
188+
&scaffold.Role{},
189+
&scaffold.RoleBinding{},
190+
&scaffold.Operator{},
193191
&scaffold.Apis{},
194192
&scaffold.Controller{},
195193
&scaffold.Version{},
@@ -218,8 +216,8 @@ func doAnsibleScaffold() error {
218216
s := &scaffold.Scaffold{}
219217
err = s.Execute(cfg,
220218
&scaffold.ServiceAccount{},
221-
&scaffold.Role{IsClusterScoped: isClusterScoped},
222-
&scaffold.RoleBinding{IsClusterScoped: isClusterScoped},
219+
&scaffold.Role{},
220+
&scaffold.RoleBinding{},
223221
&scaffold.CRD{Resource: resource},
224222
&scaffold.CR{Resource: resource},
225223
&ansible.BuildDockerfile{GeneratePlaybook: generatePlaybook},
@@ -247,7 +245,7 @@ func doAnsibleScaffold() error {
247245
GeneratePlaybook: generatePlaybook,
248246
Resource: *resource,
249247
},
250-
&ansible.DeployOperator{IsClusterScoped: isClusterScoped},
248+
&ansible.DeployOperator{},
251249
&ansible.Travis{},
252250
&ansible.MoleculeTestLocalMolecule{},
253251
&ansible.MoleculeTestLocalPrepare{Resource: *resource},
@@ -311,7 +309,7 @@ func doHelmScaffold() error {
311309
if err != nil {
312310
return fmt.Errorf("failed to get kubernetes config: %s", err)
313311
}
314-
roleScaffold, err := helm.CreateRoleScaffold(k8sCfg, chart, isClusterScoped)
312+
roleScaffold, err := helm.CreateRoleScaffold(k8sCfg, chart)
315313
if err != nil {
316314
return fmt.Errorf("failed to generate role scaffold: %s", err)
317315
}
@@ -325,8 +323,8 @@ func doHelmScaffold() error {
325323
},
326324
&scaffold.ServiceAccount{},
327325
roleScaffold,
328-
&scaffold.RoleBinding{IsClusterScoped: isClusterScoped},
329-
&helm.Operator{IsClusterScoped: isClusterScoped},
326+
&scaffold.RoleBinding{IsClusterScoped: roleScaffold.IsClusterScoped},
327+
&helm.Operator{},
330328
&scaffold.CRD{Resource: resource},
331329
&scaffold.CR{
332330
Resource: resource,

doc/ansible/user-guide.md

+2-14
Original file line numberDiff line numberDiff line change
@@ -39,20 +39,7 @@ layout][layout_doc] doc.
3939

4040
#### Operator scope
4141

42-
A namespace-scoped operator (the default) watches and manages resources in a single namespace, whereas a cluster-scoped operator watches and manages resources cluster-wide. Namespace-scoped operators are preferred because of their flexibility. They enable decoupled upgrades, namespace isolation for failures and monitoring, and differing API definitions. However, there are use cases where a cluster-scoped operator may make sense. For example, the [cert-manager](https://github.com/jetstack/cert-manager) operator is often deployed with cluster-scoped permissions and watches so that it can manage issuing certificates for an entire cluster.
43-
44-
If you'd like to create your memcached-operator project to be cluster-scoped use the following `operator-sdk new` command instead:
45-
```
46-
$ operator-sdk new memcached-operator --cluster-scoped --api-version=cache.example.com/v1alpha1 --kind=Memcached --type=ansible
47-
```
48-
49-
Using `--cluster-scoped` will scaffold the new operator with the following modifications:
50-
* `deploy/operator.yaml` - Set `WATCH_NAMESPACE=""` instead of setting it to the pod's namespace
51-
* `deploy/role.yaml` - Use `ClusterRole` instead of `Role`
52-
* `deploy/role_binding.yaml`:
53-
* Use `ClusterRoleBinding` instead of `RoleBinding`
54-
* Use `ClusterRole` instead of `Role` for roleRef
55-
* Set the subject namespace to `REPLACE_NAMESPACE`. This must be changed to the namespace in which the operator is deployed.
42+
Read the [operator scope][operator_scope] documentation on how to run your operator as namespace-scoped vs cluster-scoped.
5643

5744
### Watches file
5845

@@ -423,6 +410,7 @@ $ kubectl delete -f deploy/service_account.yaml
423410
$ kubectl delete -f deploy/crds/cache_v1alpha1_memcached_crd.yaml
424411
```
425412

413+
[operator_scope]:./../operator-scope.md
426414
[install_guide]: ../user/install-operator-sdk.md
427415
[layout_doc]:./project_layout.md
428416
[homebrew_tool]:https://brew.sh/

doc/helm/user-guide.md

+2-14
Original file line numberDiff line numberDiff line change
@@ -66,21 +66,8 @@ If `--helm-chart-version` is not set, the SDK will fetch the latest available ve
6666

6767
### Operator scope
6868

69-
A namespace-scoped operator (the default) watches and manages resources in a single namespace, whereas a cluster-scoped operator watches and manages resources cluster-wide. Namespace-scoped operators are preferred because of their flexibility. They enable decoupled upgrades, namespace isolation for failures and monitoring, and differing API definitions. However, there are use cases where a cluster-scoped operator may make sense. For example, the [cert-manager](https://github.com/jetstack/cert-manager) operator is often deployed with cluster-scoped permissions and watches so that it can manage issuing certificates for an entire cluster.
69+
Read the [operator scope][operator_scope] documentation on how to run your operator as namespace-scoped vs cluster-scoped.
7070

71-
If you'd like to create your nginx-operator project to be cluster-scoped use the following `operator-sdk new` command instead:
72-
73-
```sh
74-
operator-sdk new nginx-operator --cluster-scoped --api-version=example.com/v1alpha1 --kind=Nginx --type=helm
75-
```
76-
77-
Using `--cluster-scoped` will scaffold the new operator with the following modifications:
78-
* `deploy/operator.yaml` - Set `WATCH_NAMESPACE=""` instead of setting it to the pod's namespace
79-
* `deploy/role.yaml` - Use `ClusterRole` instead of `Role`
80-
* `deploy/role_binding.yaml`:
81-
* Use `ClusterRoleBinding` instead of `RoleBinding`
82-
* Use `ClusterRole` instead of `Role` for roleRef
83-
* Set the subject namespace to `REPLACE_NAMESPACE`. This must be changed to the namespace in which the operator is deployed.
8471

8572
## Customize the operator logic
8673

@@ -342,6 +329,7 @@ kubectl delete -f deploy/service_account.yaml
342329
kubectl delete -f deploy/crds/example_v1alpha1_nginx_crd.yaml
343330
```
344331

332+
[operator_scope]:./../operator-scope.md
345333
[install_guide]: ../user/install-operator-sdk.md
346334
[layout_doc]:./project_layout.md
347335
[homebrew_tool]:https://brew.sh/

doc/operator-scope.md

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
## Operator scope
2+
3+
A namespace-scoped operator watches and manages resources in a single namespace, whereas a cluster-scoped operator watches and manages resources cluster-wide. Namespace-scoped operators are preferred because of their flexibility. They enable decoupled upgrades, namespace isolation for failures and monitoring, and differing API definitions.
4+
5+
However, there are use cases where a cluster-scoped operator may make sense. For example, the [cert-manager](https://github.com/jetstack/cert-manager) operator is often deployed with cluster-scoped permissions and watches so that it can manage issuing certificates for an entire cluster.
6+
7+
The SDK scaffolds operators to be namespaced by default but with a few modifications to the default manifests the operator can be run as cluster-scoped.
8+
9+
* `deploy/operator.yaml`:
10+
* Set `WATCH_NAMESPACE=""` to watch all namespaces instead of setting it to the pod's namespace
11+
* `deploy/role.yaml`:
12+
* Use `ClusterRole` instead of `Role`
13+
* `deploy/role_binding.yaml`:
14+
* Use `ClusterRoleBinding` instead of `RoleBinding`
15+
* Use `ClusterRole` instead of `Role` for `roleRef`
16+
* Set the subject namespace to the namespace in which the operator is deployed.
17+
18+
### CRD scope
19+
20+
Additionally the CustomResourceDefinition (CRD) scope can also be changed for cluster-scoped operators so that there is only a single instance (for a given name) of the CRD to manage across the cluster.
21+
22+
For each CRD that needs to be cluster-scoped, update its manifest to be cluster-scoped.
23+
24+
* `deploy/crds/<group>_<version>_<kind>_crd.yaml`
25+
* Set `spec.scope: Cluster`

doc/sdk-cli-reference.md

-1
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,6 @@ Scaffolds a new operator project.
259259
* `--api-version` string - CRD APIVersion in the format `$GROUP_NAME/$VERSION` (e.g app.example.com/v1alpha1)
260260
* `--kind` string - CRD Kind. (e.g AppService)
261261
* `--generate-playbook` - Generate a playbook skeleton. (Only used for `--type ansible`)
262-
* `--cluster-scoped` - Initialize the operator to be cluster-scoped instead of namespace-scoped
263262
* `--helm-chart` string - Initialize helm operator with existing helm chart (`<URL>`, `<repo>/<name>`, or local path)
264263
* `--helm-chart-repo` string - Chart repository URL for the requested helm chart
265264
* `--helm-chart-version` string - Specific version of the helm chart (default is latest version)

doc/user-guide.md

+2-15
Original file line numberDiff line numberDiff line change
@@ -57,20 +57,7 @@ The Operator SDK uses [vendoring][go_vendoring] to supply dependencies to operat
5757

5858
#### Operator scope
5959

60-
A namespace-scoped operator (the default) watches and manages resources in a single namespace, whereas a cluster-scoped operator watches and manages resources cluster-wide. Namespace-scoped operators are preferred because of their flexibility. They enable decoupled upgrades, namespace isolation for failures and monitoring, and differing API definitions. However, there are use cases where a cluster-scoped operator may make sense. For example, the [cert-manager](https://github.com/jetstack/cert-manager) operator is often deployed with cluster-scoped permissions and watches so that it can manage issuing certificates for an entire cluster.
61-
62-
If you'd like to create your memcached-operator project to be cluster-scoped use the following `operator-sdk new` command instead:
63-
```sh
64-
$ operator-sdk new memcached-operator --cluster-scoped
65-
```
66-
67-
Using `--cluster-scoped` will scaffold the new operator with the following modifications:
68-
* `deploy/operator.yaml` - Set `WATCH_NAMESPACE=""` instead of setting it to the pod's namespace
69-
* `deploy/role.yaml` - Use `ClusterRole` instead of `Role`
70-
* `deploy/role_binding.yaml`:
71-
* Use `ClusterRoleBinding` instead of `RoleBinding`
72-
* Use `ClusterRole` instead of `Role` for roleRef
73-
* Set the subject namespace to `REPLACE_NAMESPACE`. This must be changed to the namespace in which the operator is deployed.
60+
Read the [operator scope][operator_scope] documentation on how to run your operator as namespace-scoped vs cluster-scoped.
7461

7562
### Manager
7663
The main program for the operator `cmd/manager/main.go` initializes and runs the [Manager][manager_go_doc].
@@ -557,7 +544,7 @@ func main() {
557544

558545
When the operator is not running in a cluster, the Manager will return an error on starting since it can't detect the operator's namespace in order to create the configmap for leader election. You can override this namespace by setting the Manager's `LeaderElectionNamespace` option.
559546

560-
547+
[operator_scope]:./operator-scope.md
561548
[install_guide]: ./user/install-operator-sdk.md
562549
[pod_eviction_timeout]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/#options
563550
[manager_options]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Options

internal/pkg/scaffold/ansible/deploy_operator.go

-5
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ const DeployOperatorFile = "operator.yaml"
2525

2626
type DeployOperator struct {
2727
input.Input
28-
IsClusterScoped bool
2928
}
3029

3130
// GetInput - gets the input
@@ -76,13 +75,9 @@ spec:
7675
name: runner
7776
env:
7877
- name: WATCH_NAMESPACE
79-
[[- if .IsClusterScoped ]]
80-
value: ""
81-
[[- else ]]
8278
valueFrom:
8379
fieldRef:
8480
fieldPath: metadata.namespace
85-
[[- end]]
8681
- name: POD_NAME
8782
valueFrom:
8883
fieldRef:

internal/pkg/scaffold/helm/operator.go

-6
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ import (
2424
// Operator specifies the Helm operator.yaml manifest scaffold
2525
type Operator struct {
2626
input.Input
27-
28-
IsClusterScoped bool
2927
}
3028

3129
// GetInput gets the scaffold execution input
@@ -59,13 +57,9 @@ spec:
5957
imagePullPolicy: Always
6058
env:
6159
- name: WATCH_NAMESPACE
62-
{{- if .IsClusterScoped }}
63-
value: ""
64-
{{- else }}
6560
valueFrom:
6661
fieldRef:
6762
fieldPath: metadata.namespace
68-
{{- end}}
6963
- name: POD_NAME
7064
valueFrom:
7165
fieldRef:

internal/pkg/scaffold/helm/role.go

+7-19
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
package helm
1616

1717
import (
18-
"errors"
1918
"fmt"
2019
"path/filepath"
2120
"sort"
@@ -41,11 +40,12 @@ import (
4140
// CreateRoleScaffold generates a role scaffold from the provided helm chart. It
4241
// renders a release manifest using the chart's default values and uses the Kubernetes
4342
// discovery API to lookup each resource in the resulting manifest.
44-
func CreateRoleScaffold(cfg *rest.Config, chart *chart.Chart, isClusterScoped bool) (*scaffold.Role, error) {
43+
// The role scaffold will have IsClusterScoped=true if the chart lists cluster scoped resources
44+
func CreateRoleScaffold(cfg *rest.Config, chart *chart.Chart) (*scaffold.Role, error) {
4545
log.Info("Generating RBAC rules")
4646

4747
roleScaffold := &scaffold.Role{
48-
IsClusterScoped: isClusterScoped,
48+
IsClusterScoped: false,
4949
SkipDefaultRules: true,
5050
// TODO: enable metrics in helm operator
5151
SkipMetricsRules: true,
@@ -72,22 +72,10 @@ func CreateRoleScaffold(cfg *rest.Config, chart *chart.Chart, isClusterScoped bo
7272
roleScaffold.SkipDefaultRules = false
7373
}
7474

75-
if !isClusterScoped {
76-
// If there are cluster-scoped resources, but we're creating a namespace-scoped operator,
77-
// log all of the cluster-scoped resources, and return a helpful error message.
78-
for _, rule := range clusterResourceRules {
79-
for _, resource := range rule.Resources {
80-
log.Errorf("Resource %s.%s is cluster-scoped, but --cluster-scoped was not set.", resource, rule.APIGroups[0])
81-
}
82-
}
83-
if len(clusterResourceRules) > 0 {
84-
return nil, errors.New("must use --cluster-scoped with chart containing cluster-scoped resources")
85-
}
86-
87-
// If we're here, there are no cluster-scoped resources, so add just the rules for namespaced resources
88-
roleScaffold.CustomRules = append(roleScaffold.CustomRules, namespacedResourceRules...)
89-
} else {
90-
// For a cluster-scoped operator, add all of the rules
75+
// Use a ClusterRole if cluster scoped resources are listed in the chart
76+
if len(clusterResourceRules) > 0 {
77+
log.Info("Scaffolding ClusterRole and ClusterRolebinding for cluster scoped resources in the helm chart")
78+
roleScaffold.IsClusterScoped = true
9179
roleScaffold.CustomRules = append(roleScaffold.CustomRules, append(clusterResourceRules, namespacedResourceRules...)...)
9280
}
9381

internal/pkg/scaffold/operator.go

-6
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ const OperatorYamlFile = "operator.yaml"
2424

2525
type Operator struct {
2626
input.Input
27-
28-
IsClusterScoped bool
2927
}
3028

3129
func (s *Operator) GetInput() (input.Input, error) {
@@ -60,13 +58,9 @@ spec:
6058
imagePullPolicy: Always
6159
env:
6260
- name: WATCH_NAMESPACE
63-
{{- if .IsClusterScoped }}
64-
value: ""
65-
{{- else }}
6661
valueFrom:
6762
fieldRef:
6863
fieldPath: metadata.namespace
69-
{{- end}}
7064
- name: POD_NAME
7165
valueFrom:
7266
fieldRef:

internal/pkg/scaffold/operator_test.go

-46
Original file line numberDiff line numberDiff line change
@@ -33,19 +33,6 @@ func TestOperator(t *testing.T) {
3333
}
3434
}
3535

36-
func TestOperatorClusterScoped(t *testing.T) {
37-
s, buf := setupScaffoldAndWriter()
38-
err := s.Execute(appConfig, &Operator{IsClusterScoped: true})
39-
if err != nil {
40-
t.Fatalf("Failed to execute the scaffold: (%v)", err)
41-
}
42-
43-
if operatorClusterScopedExp != buf.String() {
44-
diffs := diffutil.Diff(operatorClusterScopedExp, buf.String())
45-
t.Fatalf("Expected vs actual differs.\n%v", diffs)
46-
}
47-
}
48-
4936
const operatorExp = `apiVersion: apps/v1
5037
kind: Deployment
5138
metadata:
@@ -80,36 +67,3 @@ spec:
8067
- name: OPERATOR_NAME
8168
value: "app-operator"
8269
`
83-
84-
const operatorClusterScopedExp = `apiVersion: apps/v1
85-
kind: Deployment
86-
metadata:
87-
name: app-operator
88-
spec:
89-
replicas: 1
90-
selector:
91-
matchLabels:
92-
name: app-operator
93-
template:
94-
metadata:
95-
labels:
96-
name: app-operator
97-
spec:
98-
serviceAccountName: app-operator
99-
containers:
100-
- name: app-operator
101-
# Replace this with the built image name
102-
image: REPLACE_IMAGE
103-
command:
104-
- app-operator
105-
imagePullPolicy: Always
106-
env:
107-
- name: WATCH_NAMESPACE
108-
value: ""
109-
- name: POD_NAME
110-
valueFrom:
111-
fieldRef:
112-
fieldPath: metadata.name
113-
- name: OPERATOR_NAME
114-
value: "app-operator"
115-
`

0 commit comments

Comments
 (0)