Skip to content
This repository was archived by the owner on Mar 24, 2023. It is now read-only.

Commit 9f9fe11

Browse files
authored
Fix empty value for env in helm values (#790)
1 parent 5559036 commit 9f9fe11

File tree

24 files changed

+180
-33
lines changed

24 files changed

+180
-33
lines changed

integration/init/factorio/expected/.ship/state.json

-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
{
22
"v1": {
33
"config": {},
4-
"helmValues": "# Factorio image version\n# ref: https://quay.io/repository/games_on_k8s/factorio?tab=tags\nimage: quay.io/games_on_k8s/factorio\nimageTag: \"0.14.22\"\n\n# Configure resource requests and limits\n# ref: http://kubernetes.io/docs/user-guide/compute-resources/\nresources:\n requests:\n memory: 512Mi\n cpu: 500m\n\n# Most of these map to environment variables. See docker-factorio for details:\n# https://github.com/games-on-k8s/docker-factorio/blob/master/README.md#environment-variable-reference\nfactorioServer:\n name: Kubernetes Server\n description: Factorio running on Kubernetes\n port: 34197\n # Lock this server down with a password.\n # password: change.me\n maxPlayers: 255\n # Publishes this server in the server browser if true.\n # You'll want to set Factorio.User below if true, as it becomes required.\n isPublic: false\n verifyIdentity: false\n # Allows or disallows console commands. Must be one of: `true`, `false`, or `admins-only`.\n allowCommands: admins-only\n # Pause the server when nobody is connected?\n noAutoPause: \"false\"\n # You'll want to change this to NodePort if you are on AWS.\n serviceType: LoadBalancer\n\n autosave:\n # Auto-save interval in minutes.\n interval: 2\n slots: 3\n\n rcon:\n enabled: false\n port: 27015\n # Empty value here enables an auto-generated password.\n password: \"\"\n serviceType: LoadBalancer\n\nfactorio:\n # Your factorio.com User/pass is needed if factorioServer.IsPublic is true.\n user:\n username: your.username\n password: your.password\n\npersistence:\n ## factorio data Persistent Volume Storage Class\n ## If defined, storageClassName: \u003cstorageClass\u003e\n ## If set to \"-\", storageClassName: \"\", which disables dynamic provisioning\n ## If undefined (the default) or set to null, no storageClassName spec is\n ## set, choosing the default provisioner. (gp2 on AWS, standard on\n ## GKE, AWS \u0026 OpenStack)\n ##\n # storageClass: \"-\"\n savedGames:\n # Set this to false if you don't care to persist saved games between restarts.\n enabled: true\n size: 1Gi\n mods:\n enabled: false\n size: 128Mi\n",
54
"releaseName": "factorio",
65
"helmValuesDefaults": "# Factorio image version\n# ref: https://quay.io/repository/games_on_k8s/factorio?tab=tags\nimage: quay.io/games_on_k8s/factorio\nimageTag: \"0.14.22\"\n\n# Configure resource requests and limits\n# ref: http://kubernetes.io/docs/user-guide/compute-resources/\nresources:\n requests:\n memory: 512Mi\n cpu: 500m\n\n# Most of these map to environment variables. See docker-factorio for details:\n# https://github.com/games-on-k8s/docker-factorio/blob/master/README.md#environment-variable-reference\nfactorioServer:\n name: Kubernetes Server\n description: Factorio running on Kubernetes\n port: 34197\n # Lock this server down with a password.\n # password: change.me\n maxPlayers: 255\n # Publishes this server in the server browser if true.\n # You'll want to set Factorio.User below if true, as it becomes required.\n isPublic: false\n verifyIdentity: false\n # Allows or disallows console commands. Must be one of: `true`, `false`, or `admins-only`.\n allowCommands: admins-only\n # Pause the server when nobody is connected?\n noAutoPause: \"false\"\n # You'll want to change this to NodePort if you are on AWS.\n serviceType: LoadBalancer\n\n autosave:\n # Auto-save interval in minutes.\n interval: 2\n slots: 3\n\n rcon:\n enabled: false\n port: 27015\n # Empty value here enables an auto-generated password.\n password: \"\"\n serviceType: LoadBalancer\n\nfactorio:\n # Your factorio.com User/pass is needed if factorioServer.IsPublic is true.\n user:\n username: your.username\n password: your.password\n\npersistence:\n ## factorio data Persistent Volume Storage Class\n ## If defined, storageClassName: \u003cstorageClass\u003e\n ## If set to \"-\", storageClassName: \"\", which disables dynamic provisioning\n ## If undefined (the default) or set to null, no storageClassName spec is\n ## set, choosing the default provisioner. (gp2 on AWS, standard on\n ## GKE, AWS \u0026 OpenStack)\n ##\n # storageClass: \"-\"\n savedGames:\n # Set this to false if you don't care to persist saved games between restarts.\n enabled: true\n size: 1Gi\n mods:\n enabled: false\n size: 128Mi\n",
76
"upstream": "https://github.com/helm/charts/tree/ffb84f85a861e765caade879491a75a6dd3091a5/stable/factorio",

integration/init/git-root-directory/expected/.ship/state.json

-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
{
22
"v1": {
33
"config": {},
4-
"helmValues": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n\n",
54
"releaseName": "values-update",
65
"helmValuesDefaults": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n\n",
76
"upstream": "https://github.com/replicatedhq/test-chart-root-dir/tree/507feecae588c958ebe82bcf701b8be63f34ac9b/",

integration/init/grafana-with-values/expected/.ship/state.json

+1-1
Large diffs are not rendered by default.

integration/init/istio-1.0.3/expected/.ship/state.json

-1
Large diffs are not rendered by default.

integration/init/istio-gogetter/expected/.ship/state.json

-1
Large diffs are not rendered by default.

integration/init/istio/expected/.ship/state.json

-1
Large diffs are not rendered by default.

integration/init/jaeger-helm/expected/.ship/state.json

-1
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
{
2+
"v1": {
3+
"config": {},
4+
"releaseName": "kibana",
5+
"helmValuesDefaults": "image:\n repository: \"docker.elastic.co/kibana/kibana-oss\"\n tag: \"6.5.4\"\n pullPolicy: \"IfNotPresent\"\n\ncommandline:\n args: []\n\nenv: {}\n # All Kibana configuration options are adjustable via env vars.\n # To adjust a config option to an env var uppercase + replace `.` with `_`\n # Ref: https://www.elastic.co/guide/en/kibana/current/settings.html\n #\n # ELASTICSEARCH_URL: http://elasticsearch-client:9200\n # SERVER_PORT: 5601\n # LOGGING_VERBOSE: \"true\"\n # SERVER_DEFAULTROUTE: \"/app/kibana\"\n\nfiles:\n kibana.yml:\n ## Default Kibana configuration from kibana-docker.\n server.name: kibana\n server.host: \"0\"\n elasticsearch.url: http://elasticsearch:9200\n\n ## Custom config properties below\n ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html\n # server.port: 5601\n # logging.verbose: \"true\"\n # server.defaultRoute: \"/app/kibana\"\n\ndeployment:\n annotations: {}\n\nservice:\n type: ClusterIP\n externalPort: 443\n internalPort: 5601\n # authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer\n ## External IP addresses of service\n ## Default: nil\n ##\n # externalIPs:\n # - 192.168.0.1\n #\n ## LoadBalancer IP if service.type is LoadBalancer\n ## Default: nil\n ##\n # loadBalancerIP: 10.2.2.2\n annotations: {}\n # Annotation example: setup ssl with aws cert when service.type is LoadBalancer\n # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT\n labels: {}\n ## Label example: show service URL in `kubectl cluster-info`\n # kubernetes.io/cluster-service: \"true\"\n ## Limit load balancer source ips to list of CIDRs (where available)\n # loadBalancerSourceRanges: []\n\ningress:\n enabled: false\n # hosts:\n # - kibana.localhost.localdomain\n # - localhost.localdomain/kibana\n # annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n # tls:\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nserviceAccount:\n # Specifies whether a service account should be created\n create: false\n # The name of the service account to use.\n # If not set and create is true, a name is generated using the fullname template\n # If set and create is false, the service account must be existing\n name:\n\nlivenessProbe:\n enabled: false\n initialDelaySeconds: 30\n timeoutSeconds: 10\n\nreadinessProbe:\n enabled: false\n initialDelaySeconds: 30\n timeoutSeconds: 10\n periodSeconds: 10\n successThreshold: 5\n\n# Enable an authproxy. Specify container in extraContainers\nauthProxyEnabled: false\n\nextraContainers: |\n# - name: proxy\n# image: quay.io/gambol99/keycloak-proxy:latest\n# args:\n# - --resource=uri=/*\n# - --discovery-url=https://discovery-url\n# - --client-id=client\n# - --client-secret=secret\n# - --listen=0.0.0.0:5602\n# - --upstream-url=http://127.0.0.1:5601\n# ports:\n# - name: web\n# containerPort: 9090\nresources: {}\n # limits:\n # cpu: 100m\n # memory: 300Mi\n # requests:\n # cpu: 100m\n # memory: 300Mi\n\npriorityClassName: \"\"\n\n# Affinity for pod assignment\n# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n# affinity: {}\n\n# Tolerations for pod assignment\n# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\ntolerations: []\n\n# Node labels for pod assignment\n# Ref: https://kubernetes.io/docs/user-guide/node-selection/\nnodeSelector: {}\n\npodAnnotations: {}\nreplicaCount: 1\nrevisionHistoryLimit: 3\n\n# To export a dashboard from a running Kibana 6.3.x use:\n# curl --user \u003cusername\u003e:\u003cpassword\u003e -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=\u003csome-dashboard-uuid\u003e \u003e my-dashboard.json\n# A dashboard is defined by a name and a string with the json payload or the download url\ndashboardImport:\n timeout: 60\n xpackauth:\n enabled: false\n username: myuser\n password: mypass\n dashboards: {}\n # k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json\n\n# List of plugins to install using initContainer\n# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well.\nplugins:\n # set to true to enable plugins installation\n enabled: false\n # set to true to remove all kibana plugins before installation\n reset: false\n # Use \u003cplugin_name,version,url\u003e to add/upgrade plugin\n values:\n # - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip\n # - logtrail,0.1.30,https://github.com/sivasamyk/logtrail/releases/download/v0.1.30/logtrail-6.4.2-0.1.30.zip\n # - other_plugin\n\npersistentVolumeClaim:\n # set to true to use pvc\n enabled: false\n # set to true to use you own pvc\n existingClaim: false\n annotations: {}\n\n accessModes:\n - ReadWriteOnce\n size: \"5Gi\"\n ## If defined, storageClassName: \u003cstorageClass\u003e\n ## If set to \"-\", storageClassName: \"\", which disables dynamic provisioning\n ## If undefined (the default) or set to null, no storageClassName spec is\n ## set, choosing the default provisioner. (gp2 on AWS, standard on\n ## GKE, AWS \u0026 OpenStack)\n ##\n # storageClass: \"-\"\n\n# default security context\nsecurityContext:\n enabled: false\n allowPrivilegeEscalation: false\n runAsUser: 1000\n fsGroup: 2000\n\nextraConfigMapMounts: []\n # - name: logtrail-configs\n # configMap: kibana-logtrail\n # mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json\n # subPath: logtrail.json\n",
6+
"upstream": "github.com/replicatedhq/test-charts/tree/316b56dd3c1209a470dccaa8016c4cad76de0299/kibana",
7+
"metadata": {
8+
"applicationType": "helm",
9+
"icon": "https://raw.githubusercontent.com/elastic/kibana/master/src/ui/public/icons/kibana-color.svg",
10+
"name": "kibana",
11+
"releaseNotes": "Add kibana (#27)",
12+
"version": "1.1.2"
13+
},
14+
"contentSHA": "f54c36389890161712f38e5c0a7b46586193d3ee6e1cb773923e6c620066e840"
15+
}
16+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
apiVersion: v1
2+
data:
3+
kibana.yml: |
4+
elasticsearch.url: http://elasticsearch:9200
5+
server.host: "0"
6+
server.name: kibana
7+
kind: ConfigMap
8+
metadata:
9+
labels:
10+
app: kibana
11+
release: kibana
12+
name: kibana
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
apiVersion: apps/v1beta1
2+
kind: Deployment
3+
metadata:
4+
labels:
5+
app: kibana
6+
release: kibana
7+
name: kibana
8+
spec:
9+
replicas: 1
10+
revisionHistoryLimit: 3
11+
template:
12+
metadata:
13+
annotations:
14+
checksum/config: 0d42b0d5d1f2c84f74f7e20e3b643f2dabf4f96963c1b4ced060624e7211f4d4
15+
labels:
16+
app: kibana
17+
release: kibana
18+
spec:
19+
containers:
20+
- env: []
21+
image: docker.elastic.co/kibana/kibana-oss:6.5.4
22+
imagePullPolicy: IfNotPresent
23+
name: kibana
24+
ports:
25+
- containerPort: 5601
26+
name: kibana
27+
protocol: TCP
28+
resources: {}
29+
volumeMounts:
30+
- mountPath: /usr/share/kibana/config/kibana.yml
31+
name: kibana
32+
subPath: kibana.yml
33+
serviceAccountName: default
34+
tolerations: []
35+
volumes:
36+
- configMap:
37+
name: kibana
38+
name: kibana
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
kind: ""
2+
apiversion: ""
3+
resources:
4+
- configmap.yaml
5+
- deployment.yaml
6+
- service.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
apiVersion: v1
2+
kind: Service
3+
metadata:
4+
labels:
5+
app: kibana
6+
release: kibana
7+
name: kibana
8+
spec:
9+
ports:
10+
- port: 443
11+
protocol: TCP
12+
targetPort: 5601
13+
selector:
14+
app: kibana
15+
release: kibana
16+
type: ClusterIP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
kind: ""
2+
apiversion: ""
3+
bases:
4+
- ../../base
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
apiVersion: v1
2+
data:
3+
kibana.yml: |
4+
elasticsearch.url: http://elasticsearch:9200
5+
server.host: "0"
6+
server.name: kibana
7+
kind: ConfigMap
8+
metadata:
9+
labels:
10+
app: kibana
11+
release: kibana
12+
name: kibana
13+
---
14+
apiVersion: v1
15+
kind: Service
16+
metadata:
17+
labels:
18+
app: kibana
19+
release: kibana
20+
name: kibana
21+
spec:
22+
ports:
23+
- port: 443
24+
protocol: TCP
25+
targetPort: 5601
26+
selector:
27+
app: kibana
28+
release: kibana
29+
type: ClusterIP
30+
---
31+
apiVersion: apps/v1beta1
32+
kind: Deployment
33+
metadata:
34+
labels:
35+
app: kibana
36+
release: kibana
37+
name: kibana
38+
spec:
39+
replicas: 1
40+
revisionHistoryLimit: 3
41+
template:
42+
metadata:
43+
annotations:
44+
checksum/config: 0d42b0d5d1f2c84f74f7e20e3b643f2dabf4f96963c1b4ced060624e7211f4d4
45+
labels:
46+
app: kibana
47+
release: kibana
48+
spec:
49+
containers:
50+
- env: []
51+
image: docker.elastic.co/kibana/kibana-oss:6.5.4
52+
imagePullPolicy: IfNotPresent
53+
name: kibana
54+
ports:
55+
- containerPort: 5601
56+
name: kibana
57+
protocol: TCP
58+
resources: {}
59+
volumeMounts:
60+
- mountPath: /usr/share/kibana/config/kibana.yml
61+
name: kibana
62+
subPath: kibana.yml
63+
serviceAccountName: default
64+
tolerations: []
65+
volumes:
66+
- configMap:
67+
name: kibana
68+
name: kibana

integration/init/kibana/metadata.yaml

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
upstream: "github.com/replicatedhq/test-charts/tree/316b56dd3c1209a470dccaa8016c4cad76de0299/kibana"
2+
args: ["--prefer-git"]
3+
skip_cleanup: false

integration/update/basic/expected/.ship/state.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"v1": {
33
"config": {},
4-
"helmValues": "replicaCount: 5\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n",
4+
"helmValues": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 5\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n",
55
"releaseName": "basic",
66
"helmValuesDefaults": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n",
77
"kustomize": {

integration/update/excluded-basic/expected/.ship/state.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"v1": {
33
"config": {},
4-
"helmValues": "replicaCount: 5\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n",
4+
"helmValues": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 5\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n",
55
"releaseName": "basic",
66
"helmValuesDefaults": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n",
77
"kustomize": {

0 commit comments

Comments
 (0)