-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathvalues.yml
More file actions
125 lines (119 loc) · 4.87 KB
/
values.yml
File metadata and controls
125 lines (119 loc) · 4.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# Values from bcgov/quickstart-openshift
global:
config:
dbName: app #test
crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single postgres
enabled: true
postgresVersion: 18
postGISVersion: '3.6'
image: 'artifacts.developer.gov.bc.ca/bcgov-docker-local/crunchy-postgres-gis:ubi9-18.1-3.6-2547'
openshift: true
imagePullPolicy: IfNotPresent
# enable below to start a new crunchy cluster after disaster from a backed-up location, crunchy will choose the best place to recover from.
# follow https://access.crunchydata.com/documentation/postgres-operator/5.2.0/tutorial/disaster-recovery/
# Clone From Backups Stored in S3 / GCS / Azure Blob Storage
clone:
enabled: false
s3:
enabled: false
pvc:
enabled: false
path: ~ # provide the proper path to source the cluster. ex: /backups/cluster/version/1, if current new cluster being created, this should be current cluster version -1, ideally
# enable this to go back to a specific timestamp in history in the current cluster.
# follow https://access.crunchydata.com/documentation/postgres-operator/5.2.0/tutorial/disaster-recovery/
# Perform an In-Place Point-in-time-Recovery (PITR)
restore:
repoName: ~ # provide repo name
enabled: false
target: ~ # 2024-03-24 17:16:00-07 this is the target timestamp to go back to in current cluster
instances:
name: db # high availability
replicas: 1 # 1 for DEV, 2 or 3 for high availability in TEST and PROD.
metadata:
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9187'
dataVolumeClaimSpec:
storage: 5Gi
storageClassName: netapp-block-standard
walStorage: 2Gi
requests:
cpu: 250m
memory: 4Gi
replicaCertCopy:
requests:
cpu: 10m
memory: 128Mi
pgBackRest:
enabled: true
backupPath: /backups/test/cluster/version # change it for PROD, create values-prod.yaml # this is only used in s3 backups context.
clusterCounter: 1 # this is the number to identify what is the current counter for the cluster, each time it is cloned it should be incremented.
# If retention-full-type set to 'count' then the oldest backups will expire when the number of backups reach the number defined in retention
# If retention-full-type set to 'time' then the number defined in retention will take that many days worth of full backups before expiration
retentionFullType: count
s3:
enabled: false # if enabled, below must be provided
retention: 7 # one weeks backup in object store.
bucket: ~
endpoint: ~
accessKey: ~
secretKey: ~
fullBackupSchedule: ~ # make sure to provide values here, if s3 is enabled.
incrementalBackupSchedule: ~ # make sure to provide values here, if s3 is enabled.
pvc:
retention: 1 # one day hot active backup in pvc
retentionFullType: count
fullBackupSchedule: 0 8 * * *
incrementalBackupSchedule: 0 0-7,9-23 * * * # every hour incremental
volume:
accessModes: 'ReadWriteOnce'
storage: 5Gi
storageClassName: netapp-file-backup
config:
requests:
cpu: 50m
memory: 128Mi
repoHost:
requests:
cpu: 100m
memory: 512Mi
sidecars:
requests:
cpu: 10m
memory: 64Mi
jobs:
requests:
cpu: 100m
memory: 512Mi
patroni:
postgresql:
pg_hba:
- 'host all all 0.0.0.0/0 scram-sha-256'
- 'host all all ::1/128 scram-sha-256'
parameters:
shared_buffers: 1GB # 25% of the 4Gi memory allocated to the pod
wal_buffers: '-1' # automatically set as 1/32 of shared_buffers (32MB)
min_wal_size: 128MB
max_wal_size: 512MB # increased from 64MB for better write performance
max_slot_wal_keep_size: 1GB # increased to prevent replica lag issues
work_mem: 32MB # increased from 2MB for better query performance
log_min_duration_statement: 1000ms # log queries taking more than 1 second to respond.
effective_io_concurrency: 20 #If the underlying disk can handle multiple simultaneous requests, then you should increase the effective_io_concurrency value and test what value provides the best application performance. All BCGov clusters have SSD.
proxy:
enabled: true
pgBouncer:
image: # it's not necessary to specify an image as the images specified in the Crunchy Postgres Operator will be pulled by default
replicas: 2
requests:
cpu: 50m
memory: 128Mi
maxConnections: 100 # make sure less than postgres max connections
poolMode: 'transaction'
# Postgres Cluster resource values:
pgmonitor:
enabled: true
exporter:
image: # it's not necessary to specify an image as the images specified in the Crunchy Postgres Operator will be pulled by default
requests:
cpu: 25m
memory: 64Mi