@@ -203,8 +203,10 @@ def test_exclude_unlogged_tables_1(self):
203
203
# @unittest.skip("skip")
204
204
def test_exclude_unlogged_tables_2 (self ):
205
205
"""
206
- make node, create unlogged, take FULL, check
207
- that unlogged was not backed up
206
+ 1. make node, create unlogged, take FULL, DELTA, PAGE,
207
+ check that unlogged table files was not backed up
208
+ 2. restore FULL, DELTA, PAGE to empty db,
209
+ ensure unlogged table exist and is epmty
208
210
"""
209
211
fname = self .id ().split ('.' )[3 ]
210
212
backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
@@ -220,6 +222,8 @@ def test_exclude_unlogged_tables_2(self):
220
222
self .set_archiving (backup_dir , 'node' , node )
221
223
node .slow_start ()
222
224
225
+ backup_ids = []
226
+
223
227
for backup_type in ['full' , 'delta' , 'page' ]:
224
228
225
229
if backup_type == 'full' :
@@ -239,6 +243,8 @@ def test_exclude_unlogged_tables_2(self):
239
243
backup_dir , 'node' , node ,
240
244
backup_type = backup_type , options = ['--stream' ])
241
245
246
+ backup_ids .append (backup_id )
247
+
242
248
filelist = self .get_backup_filelist (
243
249
backup_dir , 'node' , backup_id )
244
250
@@ -258,96 +264,25 @@ def test_exclude_unlogged_tables_2(self):
258
264
rel_path + '.3' , filelist ,
259
265
"Unlogged table was not excluded" )
260
266
261
- # Clean after yourself
262
- self .del_test_dir (module_name , fname )
267
+ # ensure restoring retrieves back only empty unlogged table
268
+ for backup_id in backup_ids :
269
+ node .stop ()
270
+ node .cleanup ()
263
271
264
- # @unittest.skip("skip")
265
- #TODO REVIEW time consuming test, but not more than 25 secs and 850M disk space :(
266
- def test_exclude_unlogged_table_and_check_backup_size_unchanged (self ):
267
- """
268
- - make backup on empty db, capture full backup stats
269
- - fill unlogged table by 160M, capture "full" backup, check its size is almost the same as for empty one
270
- - increase unlogged table by 160M again, check "delta" backup size increment is less than 1M
271
- - increase unlogged table by 160M again, check "page" backup size increment is less than 1M
272
- """
272
+ self .restore_node (backup_dir , 'node' , node , backup_id = backup_id )
273
273
274
- fname = self .id ().split ('.' )[3 ]
275
- backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
276
- node = self .make_simple_node (
277
- base_dir = os .path .join (module_name , fname , 'node' ),
278
- set_replication = True ,
279
- initdb_params = ['--data-checksums' ],
280
- pg_options = {
281
- "shared_buffers" : "10MB" })
274
+ node .slow_start ()
282
275
283
- self .init_pb (backup_dir )
284
- self .add_instance (backup_dir , 'node' , node )
285
- self .set_archiving (backup_dir , 'node' , node )
286
- node .slow_start ()
287
-
288
- # init full backup and stats on empty db
289
- backup_id_empty = self .backup_node (
290
- backup_dir , 'node' , node , backup_type = "full" ,
291
- return_id = True ,
292
- options = ["-j" , "4" , "--stream" ]
293
- )
294
-
295
- show_pb_empty = self .show_pb (
296
- backup_dir , 'node' , backup_id = backup_id_empty ) # ['recovery-time']
297
-
298
- # fill unlogged table by 400M, ensure second "full" backup is almost the same size
299
- node .safe_psql (
300
- "postgres" ,
301
- "create unlogged table t_logged as select i"
302
- " as id from generate_series(0,4005000) i" )
303
-
304
- backup_id_full = self .backup_node (
305
- backup_dir , 'node' , node , backup_type = "full" ,
306
- return_id = True ,
307
- options = ["-j" , "4" , "--stream" ]
308
- )
309
-
310
- show_pb_full = self .show_pb (
311
- backup_dir , 'node' , backup_id = backup_id_full )
312
-
313
- self .assertTrue (show_pb_full ["data-bytes" ] - show_pb_empty ["data-bytes" ] < 1024 * 1024 )
314
-
315
- # ensure "delta" backup skips 400M increment to unlogged table
316
- node .safe_psql (
317
- "postgres" ,
318
- "insert into t_logged "
319
- " select from generate_series(0,4005000)" )
320
-
321
- backup_id_delta = self .backup_node (
322
- backup_dir , 'node' , node , backup_type = "delta" ,
323
- return_id = True ,
324
- options = ["-j" , "4" , "--stream" ]
325
- )
326
-
327
- show_pb_delta = self .show_pb (
328
- backup_dir , 'node' , backup_id = backup_id_delta )
329
-
330
- self .assertTrue (show_pb_delta ["data-bytes" ] < 1024 * 1024 )
331
-
332
- # ensure "page" backup skips 400M increment to unlogged table
333
- node .safe_psql (
334
- "postgres" ,
335
- "insert into t_logged "
336
- " select from generate_series(0,4005000)" )
337
-
338
- backup_id_page = self .backup_node (
339
- backup_dir , 'node' , node , backup_type = "page" ,
340
- return_id = True ,
341
- options = ["-j" , "4" , "--stream" ]
342
- )
343
-
344
- show_pb_page = self .show_pb (
345
- backup_dir , 'node' , backup_id = backup_id_page )
346
- self .assertTrue (show_pb_page ["data-bytes" ] < 1024 * 1024 )
276
+ self .assertEqual (
277
+ node .safe_psql (
278
+ 'postgres' ,
279
+ 'select count(*) from test' ).decode ('utf-8' ).rstrip (),
280
+ '0' )
347
281
348
282
# Clean after yourself
349
283
self .del_test_dir (module_name , fname )
350
284
285
+
351
286
# @unittest.skip("skip")
352
287
def test_exclude_log_dir (self ):
353
288
"""
0 commit comments