@@ -71,24 +71,52 @@ poolManager:
71
71
# Snapshots with this suffix are considered preliminary. They are not supposed to be accessible to end-users.
72
72
preSnapshotSuffix : " _pre"
73
73
74
+ # Configure database containers
75
+ databaseContainer : &db_container
76
+ # Database Lab provisions thin clones using Docker containers and uses auxiliary containers.
77
+ # We need to specify which Postgres Docker image is to be used for that.
78
+ # The default is the extended Postgres image built on top of the official Postgres image
79
+ # (See https://postgres.ai/docs/database-lab/supported_databases).
80
+ # It is possible to choose any custom or official Docker image that runs Postgres. Our Dockerfile
81
+ # (See https://gitlab.com/postgres-ai/custom-images/-/tree/master/extended)
82
+ # is recommended in case if customization is needed.
83
+ dockerImage : " postgresai/extended-postgres:13"
84
+
85
+ # Custom parameters for containers with PostgreSQL, see
86
+ # https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources
87
+ containerConfig :
88
+ " shm-size " : 1gb
89
+
90
+ # Adjust database configuration
91
+ databaseConfigs : &db_configs
92
+ configs :
93
+ # In order to match production plans with Database Lab plans set parameters related to Query Planning as on production.
94
+ shared_buffers : 1GB
95
+ # shared_preload_libraries – copy the value from the source
96
+ # Adding shared preload libraries, make sure that there are "pg_stat_statements, auto_explain, logerrors" in the list.
97
+ # It is necessary to perform query and db migration analysis.
98
+ shared_preload_libraries : " pg_stat_statements, auto_explain, logerrors"
99
+ # work_mem and all the Query Planning parameters – copy the values from the source.
100
+ # To do it, use this query:
101
+ # select format($$%s = '%s'$$, name, setting)
102
+ # from pg_settings
103
+ # where
104
+ # name ~ '(work_mem$|^enable_|_cost$|scan_size$|effective_cache_size|^jit)'
105
+ # or name ~ '(^geqo|default_statistics_target|constraint_exclusion|cursor_tuple_fraction)'
106
+ # or name ~ '(collapse_limit$|parallel|plan_cache_mode)';
107
+ work_mem : " 100MB"
108
+ # ... put Query Planning parameters here
109
+
74
110
# Details of provisioning – where data is located,
75
111
# thin cloning method, etc.
76
112
provision :
113
+ << : *db_container
77
114
# Pool of ports for Postgres clones. Ports will be allocated sequentially,
78
115
# starting from the lowest value. The "from" value must be less than "to".
79
116
portPool :
80
117
from : 6000
81
118
to : 6100
82
119
83
- # Database Lab provisions thin clones using Docker containers, we need
84
- # to specify which Postgres Docker image is to be used when cloning.
85
- # The default is the extended Postgres image built on top of the official Postgres image
86
- # (See https://postgres.ai/docs/database-lab/supported_databases).
87
- # Any custom or official Docker image that runs Postgres. Our Dockerfile
88
- # (See https://gitlab.com/postgres-ai/custom-images/-/tree/master/extended)
89
- # is recommended in case if customization is needed.
90
- dockerImage : " postgresai/extended-postgres:13"
91
-
92
120
# Use sudo for ZFS/LVM and Docker commands if Database Lab server running
93
121
# outside a container. Keep it "false" (default) when running in a container.
94
122
useSudo : false
@@ -97,10 +125,6 @@ provision:
97
125
# existing users to log in with old passwords.
98
126
keepUserPasswords : false
99
127
100
- # Custom parameters for clone containers (https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources).
101
- containerConfig :
102
- " shm-size " : 1gb
103
-
104
128
# Data retrieval flow. This section defines both initial retrieval, and rules
105
129
# to keep the data directory in a synchronized state with the source. Both are optional:
106
130
# you may already have the data directory, so neither initial retrieval nor
@@ -145,13 +169,11 @@ retrieval:
145
169
# Dumps PostgreSQL database from provided source.
146
170
logicalDump :
147
171
options :
172
+ << : *db_container
148
173
# The dump file will be automatically created on this location and then used to restore.
149
174
# Ensure that there is enough disk space.
150
175
dumpLocation : " /var/lib/dblab/dblab_pool/dump"
151
176
152
- # The Docker image containing the tools required to get a dump.
153
- dockerImage : " postgresai/extended-postgres:13"
154
-
155
177
# Source of data.
156
178
source :
157
179
# Source types: "local", "remote", "rdsIam"
@@ -202,19 +224,16 @@ retrieval:
202
224
# forceInit: false
203
225
# # Option to adjust PostgreSQL configuration for a logical dump job.
204
226
# # It's useful if a dumped database contains non-standard extensions.
205
- # configs:
206
- # shared_preload_libraries: "pg_stat_statements"
227
+ # <<: *pg_configs
207
228
208
229
# Restores PostgreSQL database from the provided dump. If you use this block, do not use
209
230
# "restore" option in the "logicalDump" job.
210
231
logicalRestore :
211
232
options :
233
+ << : *db_container
212
234
# The location of the archive file (or directory, for a directory-format archive) to be restored.
213
235
dumpLocation : " /var/lib/dblab/dblab_pool/dump"
214
236
215
- # The Docker image containing the tools required to restore.
216
- dockerImage : " postgresai/extended-postgres:13"
217
-
218
237
# Use parallel jobs to restore faster.
219
238
parallelJobs : 2
220
239
@@ -224,8 +243,7 @@ retrieval:
224
243
225
244
# Option to adjust PostgreSQL configuration for a logical restore job
226
245
# It's useful if a restored database contains non-standard extensions.
227
- # configs:
228
- # shared_preload_libraries: "pg_stat_statements"
246
+ # <<: *pg_configs
229
247
230
248
# Option for specifying the database list that must be restored.
231
249
# By default, DLE restores all available databases.
@@ -242,42 +260,24 @@ retrieval:
242
260
243
261
logicalSnapshot :
244
262
options :
263
+ # Adjust PostgreSQL configuration
264
+ << : *db_configs
265
+
266
+ # It is possible to define a pre-precessing script. For example, "/tmp/scripts/custom.sh".
267
+ # Default: empty string (no pre-processing defined).
268
+ # This can be used for scrubbing eliminating PII data, to define data masking, etc.
269
+ preprocessingScript : " "
270
+
245
271
# Define pre-precessing SQL queries for data patching. For example, "/tmp/scripts/sql".
246
272
dataPatching :
247
- # The Docker image to run data patching container.
248
- dockerImage : " postgresai/extended-postgres:13"
249
-
273
+ << : *db_container
250
274
queryPreprocessing :
251
275
# Path to SQL pre-processing queries. Default: empty string (no pre-processing defined).
252
276
queryPath : " "
253
277
254
278
# Worker limit for parallel queries.
255
279
maxParallelWorkers : 2
256
280
257
- # It is possible to define a pre-precessing script. For example, "/tmp/scripts/custom.sh".
258
- # Default: empty string (no pre-processing defined).
259
- # This can be used for scrubbing eliminating PII data, to define data masking, etc.
260
- preprocessingScript : " "
261
-
262
- # Adjust PostgreSQL configuration
263
- configs :
264
- # In order to match production plans with Database Lab plans set parameters related to Query Planning as on production.
265
- shared_buffers : 1GB
266
- # shared_preload_libraries – copy the value from the source
267
- # Adding shared preload libraries, make sure that there are "pg_stat_statements, auto_explain, logerrors" in the list.
268
- # It is necessary to perform query and db migration analysis.
269
- shared_preload_libraries : " pg_stat_statements, auto_explain, logerrors"
270
- # work_mem and all the Query Planning parameters – copy the values from the source.
271
- # To do it, use this query:
272
- # select format($$%s = '%s'$$, name, setting)
273
- # from pg_settings
274
- # where
275
- # name ~ '(work_mem$|^enable_|_cost$|scan_size$|effective_cache_size|^jit)'
276
- # or name ~ '(^geqo|default_statistics_target|constraint_exclusion|cursor_tuple_fraction)'
277
- # or name ~ '(collapse_limit$|parallel|plan_cache_mode)';
278
- work_mem : " 100MB"
279
- # ... put Query Planning parameters here
280
-
281
281
cloning :
282
282
# Host that will be specified in database connection info for all clones
283
283
# Use public IP address if database connections are allowed from outside
0 commit comments