Skip to content

Commit 49edc7f

Browse files
committed
test(backup): smoke tests for incremental NAS backup chain
Adds five new test cases to test_backup_recovery_nas.py covering the end-to-end behaviour of the incremental NAS backup feature: * test_incremental_chain_cadence - Sets nas.backup.full.every=3, takes 5 backups, verifies the type pattern is FULL, INC, INC, FULL, INC. * test_restore_from_incremental - FULL + 2 INCs, each with a marker file. Restores from the latest INC and verifies all three markers are present (i.e. qemu-img convert flattened the chain correctly). * test_delete_middle_incremental_repairs_chain - Builds FULL, INC1, INC2; deletes INC1 (no force needed); restores from the surviving INC2 and verifies that markers from FULL, INC1 (which was deleted), and INC2 are all present — proving the rebase merged INC1's blocks into INC2. * test_refuse_delete_full_with_children - Verifies plain delete of a FULL that has children fails, and delete with forced=true succeeds and removes the whole chain. * test_stopped_vm_falls_back_to_full - Sets cadence to 2, takes one backup (FULL), stops the VM, triggers another (cadence would say INC). Verifies the second backup is recorded as FULL because the agent fell back when backup-begin couldn't run on a stopped VM. All tests restore nas.backup.full.every to 10 in finally blocks. Refs: #12899
1 parent b8d069e commit 49edc7f

1 file changed

Lines changed: 219 additions & 0 deletions

File tree

test/integration/smoke/test_backup_recovery_nas.py

Lines changed: 219 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,3 +265,222 @@ def test_vm_backup_create_vm_from_backup_in_another_zone(self):
265265
self.assertEqual(backup_repository.crosszoneinstancecreation, True, "Cross-Zone Instance Creation could not be enabled on the backup repository")
266266

267267
self.vm_backup_create_vm_from_backup_int(template.id, [network.id])
268+
269+
# ------------------------------------------------------------------
270+
# Incremental backup tests (RFC #12899 / PR #13074)
271+
# ------------------------------------------------------------------
272+
# These tests exercise the incremental NAS backup chain semantics:
273+
# full -> incN cadence, restore-from-incremental, delete-middle chain
274+
# repair, refuse-delete-full-with-children, and stopped-VM fallback.
275+
#
276+
# All tests set nas.backup.full.every to a small value (3) so a chain
277+
# forms quickly without needing many backup iterations. They restore
278+
# the original value at teardown.
279+
280+
def _set_full_every(self, value):
281+
Configurations.update(self.apiclient, name='nas.backup.full.every',
282+
value=str(value), zoneid=self.zone.id)
283+
284+
def _backup_type(self, backup):
285+
# Backup objects expose `type`; for chained backups it's "INCREMENTAL", else "FULL".
286+
return getattr(backup, 'type', 'FULL') or 'FULL'
287+
288+
@attr(tags=["advanced", "backup"], required_hardware="true")
289+
def test_incremental_chain_cadence(self):
290+
"""
291+
With nas.backup.full.every=3, the sequence of backups should be
292+
FULL, INCREMENTAL, INCREMENTAL, FULL, INCREMENTAL, ...
293+
"""
294+
self.backup_offering.assignOffering(self.apiclient, self.vm.id)
295+
self._set_full_every(3)
296+
try:
297+
ssh_client_vm = self.vm.get_ssh_client(reconnect=True)
298+
ssh_client_vm.execute("touch /root/incremental_marker_1.txt")
299+
300+
created = []
301+
for i in range(5):
302+
Backup.create(self.apiclient, self.vm.id, "inc_chain_%d" % i)
303+
# write a small change so each incremental has something to capture
304+
ssh_client_vm.execute("dd if=/dev/urandom of=/root/delta_%d bs=64k count=4 2>/dev/null" % i)
305+
time.sleep(2)
306+
created = Backup.list(self.apiclient, self.vm.id)
307+
308+
self.assertEqual(len(created), 5, "Expected 5 backups after 5 Backup.create calls")
309+
# Sort oldest-first by date
310+
created.sort(key=lambda b: b.created)
311+
312+
expected = ['FULL', 'INCREMENTAL', 'INCREMENTAL', 'FULL', 'INCREMENTAL']
313+
actual = [self._backup_type(b).upper() for b in created]
314+
self.assertEqual(actual, expected,
315+
"With nas.backup.full.every=3, chain pattern should be %s but was %s" % (expected, actual))
316+
317+
# Cleanup all backups (newest first to satisfy chain rules without forced=true)
318+
for b in reversed(created):
319+
Backup.delete(self.apiclient, b.id)
320+
finally:
321+
self._set_full_every(10)
322+
self.backup_offering.removeOffering(self.apiclient, self.vm.id)
323+
324+
@attr(tags=["advanced", "backup"], required_hardware="true")
325+
def test_restore_from_incremental(self):
326+
"""
327+
Take FULL + 2 INCREMENTAL backups, each with a marker file. Restore from the
328+
latest incremental and verify all three markers are present (chain flatten).
329+
"""
330+
self.backup_offering.assignOffering(self.apiclient, self.vm.id)
331+
self._set_full_every(5)
332+
try:
333+
ssh_client_vm = self.vm.get_ssh_client(reconnect=True)
334+
ssh_client_vm.execute("touch /root/marker_full.txt")
335+
Backup.create(self.apiclient, self.vm.id, "rfi_full")
336+
time.sleep(3)
337+
338+
ssh_client_vm.execute("touch /root/marker_inc1.txt")
339+
Backup.create(self.apiclient, self.vm.id, "rfi_inc1")
340+
time.sleep(3)
341+
342+
ssh_client_vm.execute("touch /root/marker_inc2.txt")
343+
Backup.create(self.apiclient, self.vm.id, "rfi_inc2")
344+
time.sleep(3)
345+
346+
backups = Backup.list(self.apiclient, self.vm.id)
347+
backups.sort(key=lambda b: b.created)
348+
self.assertEqual(len(backups), 3)
349+
self.assertEqual(self._backup_type(backups[0]).upper(), 'FULL')
350+
self.assertEqual(self._backup_type(backups[2]).upper(), 'INCREMENTAL')
351+
352+
new_vm_name = "vm-from-inc-" + str(int(time.time()))
353+
new_vm = Backup.createVMFromBackup(self.apiclient, self.services["small"],
354+
mode=self.services["mode"], backupid=backups[2].id, vmname=new_vm_name,
355+
accountname=self.account.name, domainid=self.account.domainid,
356+
zoneid=self.zone.id)
357+
self.cleanup.append(new_vm)
358+
359+
ssh_new = new_vm.get_ssh_client(reconnect=True)
360+
for marker in ("marker_full.txt", "marker_inc1.txt", "marker_inc2.txt"):
361+
result = ssh_new.execute("ls /root/%s" % marker)
362+
self.assertIn(marker, result[0],
363+
"Restored VM should have %s (chain flattened correctly)" % marker)
364+
365+
for b in reversed(backups):
366+
Backup.delete(self.apiclient, b.id)
367+
finally:
368+
self._set_full_every(10)
369+
self.backup_offering.removeOffering(self.apiclient, self.vm.id)
370+
371+
@attr(tags=["advanced", "backup"], required_hardware="true")
372+
def test_delete_middle_incremental_repairs_chain(self):
373+
"""
374+
Delete a MIDDLE incremental from a FULL -> INC1 -> INC2 chain.
375+
The chain repair should rebase INC2 onto FULL, and the final restore
376+
should still produce a working VM with all expected blocks.
377+
"""
378+
self.backup_offering.assignOffering(self.apiclient, self.vm.id)
379+
self._set_full_every(5)
380+
try:
381+
ssh_client_vm = self.vm.get_ssh_client(reconnect=True)
382+
ssh_client_vm.execute("touch /root/dmi_full.txt")
383+
Backup.create(self.apiclient, self.vm.id, "dmi_full")
384+
time.sleep(3)
385+
ssh_client_vm.execute("touch /root/dmi_inc1.txt")
386+
Backup.create(self.apiclient, self.vm.id, "dmi_inc1")
387+
time.sleep(3)
388+
ssh_client_vm.execute("touch /root/dmi_inc2.txt")
389+
Backup.create(self.apiclient, self.vm.id, "dmi_inc2")
390+
time.sleep(3)
391+
392+
backups = Backup.list(self.apiclient, self.vm.id)
393+
backups.sort(key=lambda b: b.created)
394+
full, inc1, inc2 = backups[0], backups[1], backups[2]
395+
396+
# Delete the middle incremental — should succeed via chain repair (no force needed)
397+
Backup.delete(self.apiclient, inc1.id)
398+
remaining = Backup.list(self.apiclient, self.vm.id)
399+
self.assertEqual(len(remaining), 2, "After deleting middle inc, two backups should remain")
400+
401+
# Restore from the remaining tail (formerly inc2) — must still produce a usable VM
402+
new_vm_name = "vm-after-mid-del-" + str(int(time.time()))
403+
new_vm = Backup.createVMFromBackup(self.apiclient, self.services["small"],
404+
mode=self.services["mode"], backupid=inc2.id, vmname=new_vm_name,
405+
accountname=self.account.name, domainid=self.account.domainid,
406+
zoneid=self.zone.id)
407+
self.cleanup.append(new_vm)
408+
ssh_new = new_vm.get_ssh_client(reconnect=True)
409+
# Both the FULL marker and (importantly) the deleted-INC1 marker should still
410+
# be present, because the rebase merged INC1's blocks into INC2.
411+
for marker in ("dmi_full.txt", "dmi_inc1.txt", "dmi_inc2.txt"):
412+
result = ssh_new.execute("ls /root/%s" % marker)
413+
self.assertIn(marker, result[0],
414+
"After mid-incremental delete and rebase, %s should still be restorable" % marker)
415+
416+
Backup.delete(self.apiclient, inc2.id)
417+
Backup.delete(self.apiclient, full.id)
418+
finally:
419+
self._set_full_every(10)
420+
self.backup_offering.removeOffering(self.apiclient, self.vm.id)
421+
422+
@attr(tags=["advanced", "backup"], required_hardware="true")
423+
def test_refuse_delete_full_with_children(self):
424+
"""
425+
Deleting a FULL that has surviving incrementals must fail without forced=true.
426+
With forced=true it must succeed and remove the entire chain.
427+
"""
428+
self.backup_offering.assignOffering(self.apiclient, self.vm.id)
429+
self._set_full_every(5)
430+
try:
431+
Backup.create(self.apiclient, self.vm.id, "rdc_full")
432+
time.sleep(3)
433+
Backup.create(self.apiclient, self.vm.id, "rdc_inc")
434+
time.sleep(3)
435+
436+
backups = Backup.list(self.apiclient, self.vm.id)
437+
backups.sort(key=lambda b: b.created)
438+
full = backups[0]
439+
440+
failed = False
441+
try:
442+
Backup.delete(self.apiclient, full.id)
443+
except Exception:
444+
failed = True
445+
self.assertTrue(failed, "Deleting a FULL with children should be refused without forced=true")
446+
447+
# Forced delete should succeed and clear the whole chain
448+
Backup.delete(self.apiclient, full.id, forced=True)
449+
remaining = Backup.list(self.apiclient, self.vm.id)
450+
self.assertIsNone(remaining, "Forced delete of FULL should remove the entire chain")
451+
finally:
452+
self._set_full_every(10)
453+
self.backup_offering.removeOffering(self.apiclient, self.vm.id)
454+
455+
@attr(tags=["advanced", "backup"], required_hardware="true")
456+
def test_stopped_vm_falls_back_to_full(self):
457+
"""
458+
When a backup is requested while the VM is stopped, even if the chain cadence
459+
would call for an incremental, the agent must fall back to a full and start a
460+
new chain. The incrementalFallback flag should be reflected in backup.type=FULL.
461+
"""
462+
self.backup_offering.assignOffering(self.apiclient, self.vm.id)
463+
self._set_full_every(2) # next backup after the first should be incremental
464+
try:
465+
Backup.create(self.apiclient, self.vm.id, "svf_first")
466+
time.sleep(3)
467+
468+
# Stop the VM and trigger another backup — should fall back to FULL
469+
self.vm.stop(self.apiclient)
470+
time.sleep(5)
471+
Backup.create(self.apiclient, self.vm.id, "svf_second")
472+
time.sleep(3)
473+
474+
backups = Backup.list(self.apiclient, self.vm.id)
475+
backups.sort(key=lambda b: b.created)
476+
self.assertEqual(len(backups), 2)
477+
self.assertEqual(self._backup_type(backups[0]).upper(), 'FULL')
478+
self.assertEqual(self._backup_type(backups[1]).upper(), 'FULL',
479+
"Stopped-VM backup must be a FULL even when cadence would have asked for an INCREMENTAL")
480+
481+
self.vm.start(self.apiclient)
482+
for b in reversed(backups):
483+
Backup.delete(self.apiclient, b.id)
484+
finally:
485+
self._set_full_every(10)
486+
self.backup_offering.removeOffering(self.apiclient, self.vm.id)

0 commit comments

Comments
 (0)