Skip to content

Commit

Permalink
[vmware] Cache images as VM templates
Browse files Browse the repository at this point in the history
Upon user request, the driver can cache the image as a VM Template
and reuse that to create the volume(s). This feature is useful
when creating many volumes in parallel from the same image.

We're not using the cinder built-in cache functionality because we
need a few extra features:
- the built-in cache doesn't account for shards. The cache entry
  will be placed on any backend/shard and could trigger a lot of
  slower cross-vc migrations when creating volumes from it.
- the built-in cache doesn't have a periodic task for deleting the
  expired cache entries
- we want to cache the images only when the customer requests it

Users can request the image cache feature when creating the volume,
by passing the use_image_cache='true' as a property (metadata).

The feature must be enabled per backend, for example:
```
[vmware]
enable_image_cache = true
```
This will enable the image cache feature for the vmware backend.

The image templates will then be stored in a folder similar to the
volumes folder: OpensStack/Project (vmware_image_cache)/Volumes,
where {backend}_image_cache is used as a project name.

The driver will periodically delete the cached images that are
expired. The expiry time can be controlled via the property
`image_cache_age_seconds` set on the backend configuration.

Only images smaller than the configured `image_cache_max_size_gb`
will be cached.

Change-Id: I6f5e481f6997a180a455b47abe525b93bcf9aa4e
  • Loading branch information
leust committed Aug 26, 2024
1 parent d45b475 commit 8333237
Show file tree
Hide file tree
Showing 5 changed files with 411 additions and 35 deletions.
2 changes: 2 additions & 0 deletions cinder/scheduler/host_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,8 @@ def update_from_volume_capability(self, capability, service=None):
# provisioned_capacity_gb if it is not set.
self.provisioned_capacity_gb = capability.get(
'provisioned_capacity_gb', self.allocated_capacity_gb)
self.provisioned_capacity_gb += capability.get(
'extra_provisioned_capacity_gb', 0)
self.thin_provisioning_support = capability.get(
'thin_provisioning_support', False)
self.thick_provisioning_support = capability.get(
Expand Down
12 changes: 9 additions & 3 deletions cinder/tests/unit/scheduler/test_host_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -1280,6 +1280,7 @@ def test_update_from_volume_capability_nopool(self):
'free_capacity_gb': 512,
'provisioned_capacity_gb': 512,
'reserved_percentage': 0,
'extra_provisioned_capacity_gb': 0,
'timestamp': None}

fake_backend.update_from_volume_capability(volume_capability)
Expand All @@ -1293,12 +1294,16 @@ def test_update_from_volume_capability_nopool(self):
fake_backend.pools['_pool0'].provisioned_capacity_gb)

# Test update for existing host state
volume_capability.update(dict(total_capacity_gb=1000))
volume_capability.update(dict(total_capacity_gb=1000,
extra_provisioned_capacity_gb=100))
fake_backend.update_from_volume_capability(volume_capability)
self.assertEqual(1000, fake_backend.pools['_pool0'].total_capacity_gb)
self.assertEqual(512 + 100,
fake_backend.pools['_pool0'].provisioned_capacity_gb)

# Test update for existing host state with different backend name
volume_capability.update(dict(volume_backend_name='magic'))
volume_capability.update(dict(volume_backend_name='magic',
extra_provisioned_capacity_gb=0))
fake_backend.update_from_volume_capability(volume_capability)
self.assertEqual(1000, fake_backend.pools['magic'].total_capacity_gb)
self.assertEqual(512, fake_backend.pools['magic'].free_capacity_gb)
Expand Down Expand Up @@ -1333,6 +1338,7 @@ def test_update_from_volume_capability_with_pools(self):
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'extra_provisioned_capacity_gb': 100,
'QoS_support': 'False',
'reserved_percentage': 0,
'dying_disks': 200,
Expand Down Expand Up @@ -1364,7 +1370,7 @@ def test_update_from_volume_capability_with_pools(self):
1024, fake_backend.pools['2nd pool'].total_capacity_gb)
self.assertEqual(1024, fake_backend.pools['2nd pool'].free_capacity_gb)
self.assertEqual(
0, fake_backend.pools['2nd pool'].provisioned_capacity_gb)
100, fake_backend.pools['2nd pool'].provisioned_capacity_gb)

capability = {
'volume_backend_name': 'Local iSCSI',
Expand Down
1 change: 1 addition & 0 deletions cinder/tests/unit/volume/drivers/vmware/test_fcd.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def setUp(self):
self._config.reserved_percentage = self.RESERVED_PERCENTAGE
self._config.vmware_datastores_as_pools = False
self._config.vmware_snapshot_format = "COW"
self._config.enable_image_cache = False
self._driver = fcd.VMwareVStorageObjectDriver(
configuration=self._config)
self._driver._vc_version = self.VC_VERSION
Expand Down
119 changes: 115 additions & 4 deletions cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ def setUp(self):
vmware_random_datastore_range=None,
vmware_datastores_as_pools=False,
allow_pulling_images_from_url=False,
enable_image_cache=False,
image_cache_max_size_gb=0,
)

self._db = mock.Mock()
Expand Down Expand Up @@ -167,20 +169,40 @@ def test_check_for_setup_error_fail(self, get_profile_id_by_name, session):
get_profile_id_by_name.assert_called_once_with(session,
self.STORAGE_PROFILE)

@mock.patch.object(VMDK_DRIVER, '_get_cached_images')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_get_volume_stats_no_pools(self, session):
def test_get_volume_stats_no_pools(self, session, mock_get_cached):
self._config.enable_image_cache = True
retr_result_mock = mock.Mock(spec=['objects'])
retr_result_mock.objects = []
session.vim.RetrievePropertiesEx.return_value = retr_result_mock
session.vim.service_content.about.instanceUuid = 'fake-service'
mock_get_cached.return_value = [
{
'name': self.IMAGE_ID,
'vm_ref': mock.sentinel.backing,
'ds_ref': mock.sentinel.datastore,
'disk_size': 100 * units.Gi,
'created_at': None
},
{
'name': self.IMAGE_ID,
'vm_ref': mock.sentinel.backing,
'ds_ref': mock.sentinel.datastore,
'disk_size': 200 * units.Gi,
'created_at': None
}
]
stats = self._driver.get_volume_stats()

mock_get_cached.assert_called_once_with()
self.assertEqual('VMware', stats['vendor_name'])
self.assertEqual(self._driver.VERSION, stats['driver_version'])
self.assertEqual('vmdk', stats['storage_protocol'])
self.assertEqual(0, stats['reserved_percentage'])
self.assertEqual(0, stats['total_capacity_gb'])
self.assertEqual(0, stats['free_capacity_gb'])
self.assertEqual(300, stats['extra_provisioned_capacity_gb'])
self.assertEqual(vmdk.LOCATION_DRIVER_NAME + ":fake-service",
stats['location_info'])

Expand Down Expand Up @@ -213,32 +235,52 @@ class props(object):

class result(object):
objects = [props()]

ds_obj = vim_util.get_moref("datastore-85", "Datastore")
datastores = {"datastore-85": {"summary": summary,
"storage_profile": {"name": "Gold"}}}
"storage_profile": {"name": "Gold"},
"datastore_object": ds_obj}}
return result(), datastores

@mock.patch.object(VMDK_DRIVER, '_get_cached_images')
@mock.patch('cinder.volume.drivers.vmware.datastore.'
'DatastoreSelector.is_datastore_usable')
@mock.patch.object(VMDK_DRIVER, '_collect_backend_stats')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_get_volume_stats_pools(self, session, mock_stats,
datastore_usable):
datastore_usable, mock_get_cached):
fake_result, fake_datastore_profiles = self._fake_stats_result()
mock_stats.return_value = (fake_result, fake_datastore_profiles)
datastore_usable.return_value = True
self._config.vmware_datastores_as_pools = True
self._config.enable_image_cache = True
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config,
additional_endpoints=[],
db=self._db)
self._driver._ds_sel = mock.MagicMock()
mock_get_cached.return_value = [
{
'name': self.IMAGE_ID,
'vm_ref': mock.sentinel.backing,
'ds_ref': vim_util.get_moref("datastore-85", "Datastore"),
'disk_size': 100 * units.Gi,
'created_at': None
},
{
'name': self.IMAGE_ID,
'vm_ref': mock.sentinel.backing,
'ds_ref': vim_util.get_moref("unknown-ds", "Datastore"),
'disk_size': 200 * units.Gi,
'created_at': None
}
]

retr_result_mock = mock.Mock(spec=['objects'])
retr_result_mock.objects = []
session.vim.RetrievePropertiesEx.return_value = retr_result_mock
session.vim.service_content.about.instanceUuid = 'fake-service'
stats = self._driver.get_volume_stats()

mock_get_cached.assert_called_once_with()
self.assertEqual('VMware', stats['vendor_name'])
self.assertEqual(self._driver.VERSION, stats['driver_version'])
self.assertEqual('vmdk', stats['storage_protocol'])
Expand All @@ -247,6 +289,8 @@ def test_get_volume_stats_pools(self, session, mock_stats,
self.assertEqual(0, stats["pools"][0]['reserved_percentage'])
self.assertEqual(9313, stats["pools"][0]['total_capacity_gb'])
self.assertEqual(4657, stats["pools"][0]['free_capacity_gb'])
self.assertEqual(100,
stats["pools"][0]["extra_provisioned_capacity_gb"])
self.assertEqual('up', stats["pools"][0]['pool_state'])
self.assertEqual('up', stats["backend_state"])
self.assertFalse(stats["pools"][0]['multiattach'])
Expand Down Expand Up @@ -287,6 +331,7 @@ def _create_volume_dict(self,
'size': size,
'volume_attachment': attachment,
'project_id': project_id,
'metadata': {},
}

def _create_volume_obj(self,
Expand Down Expand Up @@ -838,6 +883,72 @@ def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type):
def test_copy_image_to_volume_with_ova_container(self):
self._test_copy_image_to_volume(container_format='ova')

@ddt.data(
({'enable_image_cache': False},
False),
({'enable_image_cache': True, 'use_image_cache': 'N/A'},
False),
({'enable_image_cache': True, 'use_image_cache': 'true',
'image_cache_max_size_gb': 1, 'image_size': 2 * units.Gi},
False),
({'enable_image_cache': True, 'use_image_cache': 'True',
'image_cache_max_size_gb': 1, 'image_size': 2 * units.Gi},
False),
({'enable_image_cache': True, 'use_image_cache': 'true',
'image_cache_max_size_gb': 2, 'image_size': 2 * units.Gi},
True)
)
@ddt.unpack
@mock.patch.object(VMDK_DRIVER, '_do_copy_image_to_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
@mock.patch.object(VMDK_DRIVER, '_create_volume_from_cached_image')
@mock.patch.object(VMDK_DRIVER,
'_get_or_create_cached_image_backing')
def test_copy_image_to_volume_cached(self, params, expected,
mock_get_cached_backing,
mock_create_from_cached,
mock_extend_backing,
mock_volumeops,
mock_do_copy_image):
self._config.enable_image_cache = (
params.get('enable_image_cache', False))
self._config.image_cache_max_size_gb = (
params.get('image_cache_max_size_gb', 0))

volume = self._create_volume_dict()

use_image_cache = params.get('use_image_cache')
if use_image_cache:
volume['metadata']['use_image_cache'] = use_image_cache

backing = mock.sentinel.backing
mock_volumeops.get_backing.return_value = backing
mock_volumeops.get_disk_size.return_value = self.VOL_SIZE * units.Gi

mock_get_cached_backing.return_value = mock.sentinel.backing

image_service = mock.Mock()
image_meta = self._create_image_meta(
size=params.get('image_size', 1 * units.Gi))
image_service.show.return_value = image_meta

self._driver.copy_image_to_volume(
mock.sentinel.context, volume, image_service, self.IMAGE_ID)

if expected:
mock_do_copy_image.assert_not_called()
mock_get_cached_backing.assert_called_once_with(
mock.sentinel.context, volume,
image_service, self.IMAGE_ID, image_meta)
mock_create_from_cached.assert_called_once_with(
volume, backing)
else:
mock_get_cached_backing.assert_not_called()
mock_do_copy_image.assert_called_once_with(
mock.sentinel.context, volume,
image_service, self.IMAGE_ID, image_meta)

@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_check_disk_conversion')
Expand Down
Loading

0 comments on commit 8333237

Please sign in to comment.