View difference between Paste ID: ixw3DsJF and gypy9pvt
SHOW: | | - or go back to the newest paste.
1
---make sure you have openssl installed
2
openssl genrsa -out bwagoner.pem 2048
3
----The CN in the next line is how you will be referenced in kubernetes rolebindings.
4
openssl req -new -key bwagoner.pem -out bwagoner.csr -subj "/CN=bwagoner"
5
----If you want the user to be part of some groups, the previous line would look something like-----
6
openssl req -new -key bwagoner.pem -out bwagoner.csr -subj "/CN=bwagoner/O=app1/O=app2"
7
8
cat bwagoner.csr | base64 | tr -d '\n'
9
10
vi bwagoner.req
11
12
apiVersion: certificates.k8s.io/v1beta1
13
kind: CertificateSigningRequest
14
metadata:
15
  name: user-request-bwagoner
16
spec:
17
  groups:
18
  - system:authenticated
19-
  request: paste base 64 csr here
19+
  request: paste base 64 csr here from the cat bwagoner.csr line
20
  usages:
21
  - digital signature
22
  - key encipherment
23
  - client auth
24
25
kubectl create -f bwagoner.req
26
kubectl get csr
27
kubectl certificate approve user-request-bwagoner
28
kubectl get csr user-request-bwagoner -o jsonpath='{.status.certificate}' | base64 -d > bwagoner.crt
29
30-
copy existing kubeconfig and replace user info with these or create all new kubeconfig:
30+
copy existing kubeconfig and replace "client-certificate-data:" and "client-key-data:" with these respectively:
31
cat bwagoner.crt |base64 |tr -d '\n'
32
cat bwagoner.pem |base64 |tr -d '\n'
33
34-
alternately create new kubeconfig:
34+
Also change username and name fields accordingly in that new kubeconfig file. those fields only really effect the local kubeconfig file and are not the usernames used by kubernetes. give new kubeconfig to client.
35-
kubectl --kubeconfig ~/.kube/config-jakub config set-cluster jakub --insecure-skip-tls-verify=true --server=https://api.my-cluster.dev.dbgcloud.io
35+
36-
kubectl --kubeconfig ~/.kube/config-jakub config set-credentials jakub --client-certificate=jakub.crt --client-key=jakub.pem --embed-certs=true
36+
37-
kubectl --kubeconfig ~/.kube/config-jakub config set-context jakub --cluster=jakub --user=jakub
37+
38-
kubectl --kubeconfig ~/.kube/config-jakub config use-context jakub
38+
39
kubectl create rolebinding bwagoner --clusterrole=admin --user=bwagoner --dry-run -o yaml
40
kubectl create rolebinding bwagoner --clusterrole=admin --user=bwagoner
41
42
-----optional: limit user to one namespace--------
43
kubectl create namespace foo
44
45
vi test.yml
46
47-
-----optional: limit to one namespace--------
47+
48
apiVersion: v1
49
kind: Role
50
apiVersion: rbac.authorization.k8s.io/v1beta1
51
metadata:
52
  name: limited-to-foo-namespace
53
  namespace: foo
54
rules:
55
- apiGroups: ["", "extensions", "apps"]
56
  resources: ["*"]
57
  verbs: ["*"]
58
- apiGroups: ["batch"]
59
  resources:
60
  - jobs
61
  - cronjobs
62
  verbs: ["*"]
63
64
---
65
kind: RoleBinding
66
apiVersion: rbac.authorization.k8s.io/v1beta1
67
metadata:
68
  name: limited-to-foo-binding
69
  namespace: foo
70
subjects:
71
- kind: User
72
  name: bwagoner
73
  namespace: foo
74
roleRef:
75
  apiGroup: rbac.authorization.k8s.io
76
  kind: Role
77
  name: limited-to-foo-namespace
78
  
79
kubectl create -f test.yml
80
81
--------if you don't want to have to specify the locked down namespace in every command on the client side then update your kubeconfig file----
82
kubectl config set-context $(kubectl config current-context) --namespace=foo