-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_volume.py
381 lines (270 loc) · 11.8 KB
/
test_volume.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
"""
Flexible Volume Management
==========================
cloudscale.ch offers flexible volume managment with SSD and bulk storage
volumes that can be attached, detached, and resized.
"""
import time
import pytest
from requests.exceptions import HTTPError
from util import extract_number
from warnings import warn
# Volume sizes are measured in GiB
GiB = 1024 ** 3
MiB = 1024 ** 2
def test_attach_and_detach_volume_on_all_images(server, volume, image):
""" Volumes can be dynamically attached and detached from servers.
"""
# Attach the volume to the server
volume.attach(server)
# Give some time for the change to actually propagate
time.sleep(5)
# Virtio block device serial numbers contain at least the first 20 bytes
# of the Volume UUID. On newer compute hosts this may be the full UUID.
#
# Note: The CSI driver relies on this behavior, changes to it may require
# an upgrade of the CSI driver.
volume_paths = server.output_of(
f"ls -1 /dev/disk/by-id/*{volume.uuid[:20]}*").splitlines()
# Some images refer to the same volume twice
assert 1 <= len(volume_paths) <= 2
# Check that volume is present
assert server.file_path_exists(volume_paths[0])
# Detach volume from server
volume.detach()
# Give some time for the change to actually propagate
time.sleep(5)
# Check that volume is no longer present
assert not server.file_path_exists(volume_paths[0])
def test_expand_volume_online_on_all_images(create_server, image):
""" On first boot, the volume size should be set to a default of 10GiB.
It can then be live resized.
"""
# Test the default server which comes with 10GiB of storage
server = create_server(image=image)
# Ensure that the device size is 10 GiB
command = 'lsblk --bytes --nodeps --noheadings --output SIZE /dev/sda'
assert server.output_of(command) == str(10 * GiB)
# Resize the root disk to 16 GiB
server.scale_root_disk(16)
# Give some time for the change to actually propagate
time.sleep(5)
# Ensure that the device has been resized
command = 'lsblk --bytes --nodeps --noheadings --output SIZE /dev/sda'
assert server.output_of(command) == str(16 * GiB)
def test_expand_filesystem_online_on_common_images(create_server, image):
""" Volumes can be resized while the host is online.
Filesystems commonly have facilities to expand to the added space on the
block device.
"""
# This should work with all the images we offer
server = create_server(image=image)
# Resize the root disk (default is 10 GiB)
server.scale_root_disk(16)
# Give some time for the change to actually propagate
time.sleep(5)
# Get the name of the device that contains root
device = server.output_of('mount | grep -w / | cut -d " " -f 1')
# Get the partition number of the device that contains root
partition = extract_number(device)
# Grow the root partition on the running system
server.assert_run(f'sudo growpart /dev/sda {partition}')
# Get the device's filesystem
fs_type = server.output_of(f'df --output=fstype {device} | tail -n 1')
# Grow the disk using the appropriate method
if fs_type == 'ext4':
server.assert_run(f'sudo resize2fs {device}')
elif fs_type == 'xfs':
server.assert_run('sudo xfs_growfs /')
else:
raise NotImplementedError(f"No known resize command for {fs_type}")
# Ensure that the device has been resized.
# The /boot and /boot/efi partition may take up to 1249 MiB of space.
assert (16 * GiB - 1249 * MiB) <= server.fs_size(device) <= 16 * GiB
def test_expand_filesystem_on_boot_on_common_images(create_server, image):
""" Volumes can be resized while the server is stopped.
Filesystems commonly grow to expand to the added space on the block device
during boot.
"""
# Create the server
server = create_server(image=image)
# Get the name of the device that contains root
device = server.output_of('mount | grep -w / | cut -d " " -f 1')
# Stop the server
server.stop()
# Resize the block device
server.scale_root_disk(16)
# Start the server
server.start()
# Ensure that the device has been resized.
# The /boot and /boot/efi partition may take up to 1249 MiB of space.
assert (16 * GiB - 1249 * MiB) <= server.fs_size(device) <= 16 * GiB
def test_maximum_number_of_volumes(server, create_volume):
""" It is possible to attach up to 128 additional volumes to a server.
"""
# Attach 127 volumes to the server (1 is already attached)
for _ in range(127):
volume = create_volume(size=10, volume_type="ssd")
volume.attach(server)
# The server now has 128 disks
disks = server.output_of('lsblk | grep disk').splitlines()
assert len(disks) == 128
# The first disk is named 'sda'
assert disks[0].split(' ')[0] == 'sda'
# The last disk is named 'sddx'
assert disks[-1].split(' ')[0] == 'sddx'
# Try to attach one more volume (to reach 129), which fails
with pytest.raises(HTTPError) as error:
volume = create_volume(size=10, volume_type="ssd")
volume.attach(server)
# A specific error messages is returned
assert error.value.response.json()['detail'] == (
"Due to internal limitations, it is currently not possible "
"to attach more than 128 volumes.")
def test_snapshot_volume_attached(server, volume):
""" Attached volumes can be snapshotted and reverted.
It is possible to create a snapshot of a volume which is currently attached
and to revert the volume back to this state.
Snapshots of volumes taken while they are attached and mounted are crash
consistent. Some in-flight data might not be in the snapshot, but the
volume can always be recovered to a consistent state.
"""
# Attach volume to server and format
volume.attach(server)
time.sleep(5)
server.assert_run('sudo mkfs.ext4 /dev/sdb')
server.assert_run('sudo mount /dev/sdb /mnt')
# Create two files. The first is synced to disk with fsync, the second
# is not synced. Data might still be in-flight.
server.assert_run(
'sudo dd if=/dev/zero of=/mnt/synced count=1 bs=1M conv=fsync')
server.assert_run(
'sudo dd if=/dev/zero of=/mnt/not-synced count=1 bs=1M')
# Create snapshot
snapshot = volume.snapshot('snap')
# Write test file to volume
server.assert_run('sudo touch /mnt/after-snapshot')
# Try reverting while attached (should fail)
with pytest.raises(HTTPError) as error:
volume.revert(snapshot)
# Assert a HTTP 400 BadRequest response with a specific error message
assert error.value.response.status_code == 400
assert error.value.response.json()['detail'] == (
'Cannot revert non-root volumes while they are attached to a server.'
)
# Detach volume
server.assert_run('sudo umount /mnt')
volume.detach()
time.sleep(5)
# Revert volume to snapshot
volume.revert(snapshot)
# Reattach and mount the volume
volume.attach(server)
time.sleep(5)
server.assert_run('sudo mount /dev/sdb /mnt')
# Verify the test files are in the correct state
assert server.file_path_exists('/mnt/synced')
assert not server.file_path_exists('/mnt/after-snapshot')
# "Warn" if the file "not-synced" exists. This is not a failure because
# depending on the exact timing the data might get written to disk.
if server.file_path_exists('/mnt/not-synced'):
warn(
'File "not-synced" is included in the snapshot although it was '
'not explicitly synced.',
)
def test_snapshot_volume_detached(server, volume):
""" Detached volumes can be snapshotted and reverted.
It is possible to create a snapshot of a volume which is detached
and to revert the volume back to this state.
Snapshots taken while the volume is detached and the filesystem unmounted
are always fully consistent. All data is written to the disk.
"""
# Attach and format volume to server
volume.attach(server)
time.sleep(5)
server.assert_run('sudo mkfs.ext4 /dev/sdb')
server.assert_run('sudo mount /dev/sdb /mnt')
# Create two files. The first is synced to disk with fsync, the second
# is not synced. Data might still be in-flight.
server.assert_run(
'sudo dd if=/dev/zero of=/mnt/synced count=1 bs=1M conv=fsync')
server.assert_run('sudo dd if=/dev/zero of=/mnt/not-synced count=1 bs=1M')
# Unmount the volume
server.assert_run('sudo umount /mnt')
# Record the checksum of the first 1GiB of the volume
# (Checksumming the whole volume would take too much time.)
sha256_before = server.output_of(
'sudo dd if=/dev/sdb count=1 bs=1GiB 2>/dev/null | sha256sum')
# Detach and create snapshot
volume.detach()
snapshot = volume.snapshot('snap')
# Attach and mount the volume again
volume.attach(server)
time.sleep(5)
server.assert_run('sudo mount /dev/sdb /mnt')
# Write test file to volume
server.assert_run('sudo touch /mnt/after-snapshot')
# Detach volume
server.assert_run('sudo umount /mnt')
volume.detach()
time.sleep(5)
# Revert volume to snapshot
volume.revert(snapshot)
# Reattach volume
volume.attach(server)
time.sleep(5)
# Recored the volume checksum
sha256_after = server.output_of(
'sudo dd if=/dev/sdb count=1 bs=1GiB 2>/dev/null | sha256sum')
# Verify the checksums match
assert sha256_before == sha256_after
# Mount the volume
server.assert_run('sudo mount /dev/sdb /mnt')
# Verify the test files are in the correct state
assert server.file_path_exists('/mnt/synced')
# Because the volume was unmounted everything must be synced to disk
assert server.file_path_exists('/mnt/not-synced')
assert not server.file_path_exists('/mnt/after-snapshot')
def test_snapshot_root_volume(create_server):
""" Root volumes can be snapshotted and reverted.
It is possible to create a snapshot of a root volume and to revert it
back to the this state.
Snapshots are crash consistent and data commited to the volume before the
snapshot is part of the snapshot, but writes in flight can be missing.
"""
server = create_server(image='debian-12')
volume = server.root_volume
# Sync everything written during boot to disk (eg. SSH host keys)
server.assert_run('sync')
# Create two files. The first is synced to disk with fsync, the second
# is not synced. Data might still be in-flight.
server.assert_run('dd if=/dev/zero of=synced count=1 bs=1M conv=fsync')
server.assert_run('dd if=/dev/zero of=not-synced count=1 bs=1M')
# Create snapshot
snapshot = volume.snapshot('snap')
# Write test file to volume
server.assert_run('touch after-snapshot')
# Try reverting while the server is running (should fail)
with pytest.raises(HTTPError) as error:
volume.revert(snapshot)
# Assert a HTTP 400 BadRequest response with a specific error message
assert error.value.response.status_code == 400
assert error.value.response.json()['detail'] == (
'Root volumes can only be reverted if server state is "stopped".'
)
# Stop the server
server.stop()
# Revert volume to snapshot
volume.revert(snapshot)
# Start the server again
server.start()
# Verify the test files are in the correct state
assert server.file_path_exists('synced')
assert not server.file_path_exists('after-snapshot')
# "Warn" if the file "not-synced" exists. This is not a failure because
# depending on the exact timing the data might get written to disk.
if server.file_path_exists('not-synced'):
warn(
'File "not-synced" is included in the snapshot although it was '
'not explicitly synced.',
)