From 0480ac6798c9369bc7963c1a6cdff5f4eaa20c47 Mon Sep 17 00:00:00 2001 From: digna-ionos Date: Wed, 23 Oct 2024 11:45:14 +0300 Subject: [PATCH] rename s3 occurrences with object storage correspondent names (#692) * rename s3 occurrences with object storage correspondent names * fix spacing for renaming * update gitbook * fix small issues like all caps, grammar, more specific doc --- README.md | 6 +-- docs/data-sources/application_loadbalancer.md | 2 +- docs/data-sources/dbaas_pgsql_backups.md | 2 +- docs/data-sources/dbaas_pgsql_cluster.md | 2 +- docs/data-sources/k8s_cluster.md | 2 +- docs/data-sources/networkloadbalancer.md | 2 +- docs/data-sources/nic.md | 2 +- docs/data-sources/s3_bucket.md | 4 +- docs/data-sources/s3_bucket_policy.md | 4 +- docs/data-sources/s3_key.md | 18 +++---- docs/data-sources/s3_object.md | 4 +- docs/data-sources/s3_objects.md | 4 +- docs/index.md | 10 ++-- docs/resources/application_loadbalancer.md | 2 +- docs/resources/autoscaling_group.md | 2 +- docs/resources/dbaas_pgsql_cluster.md | 2 +- docs/resources/k8s_cluster.md | 6 +-- docs/resources/networkloadbalancer.md | 4 +- docs/resources/nic.md | 4 +- docs/resources/s3_bucket.md | 8 +-- .../resources/s3_bucket_cors_configuration.md | 4 +- .../s3_bucket_lifecycle_configuration.md | 12 ++--- .../s3_bucket_object_lock_configuration.md | 4 +- docs/resources/s3_bucket_policy.md | 6 +-- .../s3_bucket_public_access_block.md | 4 +- ...et_server_side_encryption_configuration.md | 4 +- docs/resources/s3_bucket_versioning.md | 4 +- .../s3_bucket_website_configuration.md | 4 +- docs/resources/s3_key.md | 14 ++--- docs/resources/s3_object.md | 12 ++--- docs/resources/s3_object_copy.md | 12 ++--- docs/resources/user.md | 2 +- gitbook_docs/summary.md | 41 +++++++------- internal/acctest/acctest.go | 10 ++-- internal/envar/envar.go | 4 +- internal/framework/provider/provider.go | 42 +++++++-------- .../data_source_bucket.go | 14 ++--- .../data_source_bucket_policy.go | 14 ++--- .../data_source_bucket_policy_test.go | 6 +-- .../data_source_bucket_test.go | 6 +-- .../data_source_object.go | 12 ++--- .../data_source_object_test.go | 6 +-- .../data_source_objects.go | 14 ++--- .../data_source_objects_test.go | 20 +++---- .../services/objectstorage/errors.go | 31 +++++++++++ .../{s3 => objectstorage}/resource_bucket.go | 16 +++--- .../resource_bucket_cors_configuration.go | 18 +++---- ...resource_bucket_cors_configuration_test.go | 10 ++-- ...resource_bucket_lifecycle_configuration.go | 28 +++++----- ...rce_bucket_lifecycle_configuration_test.go | 10 ++-- ...source_bucket_object_lock_configuration.go | 18 +++---- ...e_bucket_object_lock_configuration_test.go | 8 +-- .../resource_bucket_policy.go | 28 +++++----- .../resource_bucket_policy_test.go | 8 +-- .../resource_bucket_public_access_block.go | 28 +++++----- ...esource_bucket_public_access_block_test.go | 8 +-- .../resource_bucket_sse_configuration.go | 18 +++---- .../resource_bucket_sse_configuration_test.go | 10 ++-- .../resource_bucket_test.go | 16 +++--- .../resource_bucket_versioning.go | 26 ++++----- .../resource_bucket_versioning_test.go | 8 +-- .../resource_bucket_website_configuration.go | 18 +++---- ...ource_bucket_website_configuration_test.go | 10 ++-- .../{s3 => objectstorage}/resource_object.go | 36 ++++++------- .../resource_object_copy.go | 34 ++++++------ .../resource_object_copy_test.go | 20 +++---- .../resource_object_test.go | 18 +++---- internal/framework/services/s3/errors.go | 31 ----------- internal/tags/tags.go | 18 +++---- ionoscloud/data_source_dbaas_pgsql_backups.go | 2 +- ionoscloud/data_source_dbaas_pgsql_cluster.go | 2 +- ionoscloud/data_source_k8s_cluster.go | 4 +- ionoscloud/data_source_s3_key.go | 12 ++--- ionoscloud/import_s3_keys_test.go | 6 +-- ionoscloud/provider.go | 12 ++--- ionoscloud/resource_dbaas_pgsql_cluster.go | 2 +- ionoscloud/resource_k8s_cluster.go | 6 +-- ionoscloud/resource_s3_key.go | 36 ++++++------- ionoscloud/resource_s3_key_test.go | 18 +++---- services/cloudapi/flowlog/flowlog.go | 4 +- services/{s3 => objectstorage}/bucket.go | 6 +-- .../{s3 => objectstorage}/bucket_delete.go | 53 +++++++++---------- services/{s3 => objectstorage}/client.go | 18 +++---- services/{s3 => objectstorage}/cors.go | 20 +++---- services/objectstorage/errors.go | 44 +++++++++++++++ services/{s3 => objectstorage}/lifecycle.go | 38 ++++++------- services/{s3 => objectstorage}/object.go | 52 +++++++++--------- services/{s3 => objectstorage}/object_copy.go | 26 ++++----- .../object_data_source.go | 14 ++--- .../{s3 => objectstorage}/object_delete.go | 26 ++++----- services/{s3 => objectstorage}/object_lock.go | 14 ++--- services/{s3 => objectstorage}/objects.go | 2 +- .../objects_paginator.go | 12 ++--- services/{s3 => objectstorage}/paginator.go | 12 ++--- services/{s3 => objectstorage}/policy.go | 44 +++++++-------- .../public_access_block.go | 10 ++-- services/{s3 => objectstorage}/sse.go | 22 ++++---- services/{s3 => objectstorage}/tags.go | 8 +-- services/{s3 => objectstorage}/versioning.go | 22 ++++---- services/{s3 => objectstorage}/website.go | 32 +++++------ services/s3/errors.go | 44 --------------- 101 files changed, 729 insertions(+), 729 deletions(-) rename internal/framework/services/{s3 => objectstorage}/data_source_bucket.go (84%) rename internal/framework/services/{s3 => objectstorage}/data_source_bucket_policy.go (85%) rename internal/framework/services/{s3 => objectstorage}/data_source_bucket_policy_test.go (92%) rename internal/framework/services/{s3 => objectstorage}/data_source_bucket_test.go (92%) rename internal/framework/services/{s3 => objectstorage}/data_source_object.go (93%) rename internal/framework/services/{s3 => objectstorage}/data_source_object_test.go (98%) rename internal/framework/services/{s3 => objectstorage}/data_source_objects.go (89%) rename internal/framework/services/{s3 => objectstorage}/data_source_objects_test.go (95%) create mode 100644 internal/framework/services/objectstorage/errors.go rename internal/framework/services/{s3 => objectstorage}/resource_bucket.go (92%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_cors_configuration.go (93%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_cors_configuration_test.go (97%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_lifecycle_configuration.go (87%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_lifecycle_configuration_test.go (98%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_object_lock_configuration.go (93%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_object_lock_configuration_test.go (98%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_policy.go (81%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_policy_test.go (97%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_public_access_block.go (83%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_public_access_block_test.go (96%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_sse_configuration.go (92%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_sse_configuration_test.go (94%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_test.go (97%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_versioning.go (85%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_versioning_test.go (94%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_website_configuration.go (96%) rename internal/framework/services/{s3 => objectstorage}/resource_bucket_website_configuration_test.go (97%) rename internal/framework/services/{s3 => objectstorage}/resource_object.go (87%) rename internal/framework/services/{s3 => objectstorage}/resource_object_copy.go (89%) rename internal/framework/services/{s3 => objectstorage}/resource_object_copy_test.go (96%) rename internal/framework/services/{s3 => objectstorage}/resource_object_test.go (98%) delete mode 100644 internal/framework/services/s3/errors.go rename services/{s3 => objectstorage}/bucket.go (97%) rename services/{s3 => objectstorage}/bucket_delete.go (64%) rename services/{s3 => objectstorage}/client.go (58%) rename services/{s3 => objectstorage}/cors.go (85%) create mode 100644 services/objectstorage/errors.go rename services/{s3 => objectstorage}/lifecycle.go (82%) rename services/{s3 => objectstorage}/object.go (93%) rename services/{s3 => objectstorage}/object_copy.go (95%) rename services/{s3 => objectstorage}/object_data_source.go (93%) rename services/{s3 => objectstorage}/object_delete.go (76%) rename services/{s3 => objectstorage}/object_lock.go (89%) rename services/{s3 => objectstorage}/objects.go (99%) rename services/{s3 => objectstorage}/objects_paginator.go (87%) rename services/{s3 => objectstorage}/paginator.go (83%) rename services/{s3 => objectstorage}/policy.go (86%) rename services/{s3 => objectstorage}/public_access_block.go (89%) rename services/{s3 => objectstorage}/sse.go (79%) rename services/{s3 => objectstorage}/tags.go (95%) rename services/{s3 => objectstorage}/versioning.go (79%) rename services/{s3 => objectstorage}/website.go (87%) delete mode 100644 services/s3/errors.go diff --git a/README.md b/README.md index e5648e553..c2400526c 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ export IONOS_USERNAME="username" export IONOS_PASSWORD="password" ``` -For managing IONOS S3 STORAGE resources you need to set the following environment variables with your credentials +For managing IONOS Object Storage resources you need to set the following environment variables with your credentials ```bash export IONOS_S3_ACCESS_KEY="accesskey" export IONOS_S3_SECRET_KEY="secretkey" @@ -104,8 +104,8 @@ See the [IonosCloud Provider documentation](https://registry.terraform.io/provid | `IONOS_LOG_LEVEL` | Specify the Log Level used to log messages. Possible values: Off, Debug, Trace | | `IONOS_PINNED_CERT` | Specify the SHA-256 public fingerprint here, enables certificate pinning | | `IONOS_CONTRACT_NUMBER` | Specify the contract number on which you wish to provision. Only valid for reseller accounts, for other types of accounts the header will be ignored | -| `IONOS_S3_ACCESS_KEY` | Specify the access key used to authenticate against the IONOS S3 STORAGE API | -| `IONOS_S3_SECRET_KEY` | Specify the secret key used to authenticate against the IONOS S3 STORAGE API | +| `IONOS_S3_ACCESS_KEY` | Specify the access key used to authenticate against the IONOS Object Storage API | +| `IONOS_S3_SECRET_KEY` | Specify the secret key used to authenticate against the IONOS Object Storage API | ## Certificate pinning: diff --git a/docs/data-sources/application_loadbalancer.md b/docs/data-sources/application_loadbalancer.md index d5d8aee90..f40d95de9 100644 --- a/docs/data-sources/application_loadbalancer.md +++ b/docs/data-sources/application_loadbalancer.md @@ -64,6 +64,6 @@ The following attributes are returned by the datasource: - `logging_lormat` - Specifies the format of the logs. - `flowlog` - Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. Immutable, forces re-creation. - - `bucket` - Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. + - `bucket` - Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. - `direction` - Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. Immutable, forces re-creation. - `name` - Specifies the name of the flow log. \ No newline at end of file diff --git a/docs/data-sources/dbaas_pgsql_backups.md b/docs/data-sources/dbaas_pgsql_backups.md index 851008354..1d7bba00e 100644 --- a/docs/data-sources/dbaas_pgsql_backups.md +++ b/docs/data-sources/dbaas_pgsql_backups.md @@ -36,7 +36,7 @@ The following attributes are returned by the datasource: * `id` - The unique ID of the resource. * `cluster_id` - The unique ID of the cluster * `size` - The size of all base backups including the wal size in MB. - * `location` - The S3 location where the backups will be stored. + * `location` - The IONOS Object Storage location where the backups will be stored. * `version` - The PostgreSQL version this backup was created from. * `is_active` - Whether a cluster currently backs up data to this backup. * `earliest_recovery_target_time` - The oldest available timestamp to which you can restore. diff --git a/docs/data-sources/dbaas_pgsql_cluster.md b/docs/data-sources/dbaas_pgsql_cluster.md index 1ffa5b924..e124d2a0e 100644 --- a/docs/data-sources/dbaas_pgsql_cluster.md +++ b/docs/data-sources/dbaas_pgsql_cluster.md @@ -55,7 +55,7 @@ The following attributes are returned by the datasource: * `lan_id` - The LAN to connect your cluster to. * `cidr` - The IP and subnet for the database. * `location` - The physical location where the cluster will be created. This will be where all of your instances live. -* `backup_location` - The S3 location where the backups will be stored. +* `backup_location` - The IONOS Object Storage location where the backups will be stored. * `display_name` - The friendly name of your cluster. * `maintenance_window` - A weekly 4 hour-long window, during which maintenance might occur * `time` diff --git a/docs/data-sources/k8s_cluster.md b/docs/data-sources/k8s_cluster.md index 7a1641c32..70a04e921 100644 --- a/docs/data-sources/k8s_cluster.md +++ b/docs/data-sources/k8s_cluster.md @@ -66,7 +66,7 @@ The following attributes are returned by the datasource: * `nat_gateway_ip` - the NAT gateway IP of the cluster if the cluster is private. * `node_subnet` - the node subnet of the cluster, if the cluster is private. * `location` - this attribute is mandatory if the cluster is private. -* `s3_buckets` - list of S3 bucket configured for K8s usage +* `s3_buckets` - list of IONOS Object Storage bucket configured for K8s usage * `kube_config` - Kubernetes configuration * `config` - structured kubernetes config consisting of a list with 1 item with the following fields: * api_version - Kubernetes API Version diff --git a/docs/data-sources/networkloadbalancer.md b/docs/data-sources/networkloadbalancer.md index 4f16c1367..edc812281 100644 --- a/docs/data-sources/networkloadbalancer.md +++ b/docs/data-sources/networkloadbalancer.md @@ -53,6 +53,6 @@ The following attributes are returned by the datasource: - `logging_lormat` - Specifies the format of the logs. * `flowlog` - Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. Immutable, forces re-creation. - - `bucket` - Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. + - `bucket` - Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. - `direction` - Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. Immutable, forces re-creation. - `name` - Specifies the name of the flow log. diff --git a/docs/data-sources/nic.md b/docs/data-sources/nic.md index 05f3beeab..b0c890b78 100644 --- a/docs/data-sources/nic.md +++ b/docs/data-sources/nic.md @@ -64,6 +64,6 @@ The following attributes are returned by the datasource: * `pci_slot`- The PCI slot number of the Nic. * `flowlog` - Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. - - `bucket` - Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. + - `bucket` - Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. - `direction` - Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. - `name` - Specifies the name of the flow log. \ No newline at end of file diff --git a/docs/data-sources/s3_bucket.md b/docs/data-sources/s3_bucket.md index 42fd5c556..2063a782e 100644 --- a/docs/data-sources/s3_bucket.md +++ b/docs/data-sources/s3_bucket.md @@ -1,10 +1,10 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket" sidebar_current: "docs-ionoscloud-datasource-s3_bucket" description: |- - Get information about IonosCloud S3 Buckets. + Get information about IonosCloud IONOS Object Storage Buckets. --- # ionoscloud_s3_bucket diff --git a/docs/data-sources/s3_bucket_policy.md b/docs/data-sources/s3_bucket_policy.md index 443d7075d..452d52117 100644 --- a/docs/data-sources/s3_bucket_policy.md +++ b/docs/data-sources/s3_bucket_policy.md @@ -1,10 +1,10 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_policy" sidebar_current: "docs-ionoscloud-datasource-s3_bucket_policy" description: |- - Get information about IonosCloud S3 Buckets policies. + Get information about IONOS Object Storage Buckets policies. --- # ionoscloud_s3_bucket_policy diff --git a/docs/data-sources/s3_key.md b/docs/data-sources/s3_key.md index 3658827d5..10c52de80 100644 --- a/docs/data-sources/s3_key.md +++ b/docs/data-sources/s3_key.md @@ -4,13 +4,13 @@ layout: "ionoscloud" page_title: "IonosCloud: s3_key" sidebar_current: "docs-resource-s3-key" description: |- - Get Information on a IonosCloud s3 key + Get Information on a IonosCloud Object Storage key --- # ionoscloud_s3_key -The **S3 key data source** can be used to search for and return an existing s3 key. -You can provide a string id which will be compared with provisioned s3 keys. +The **IONOS Object Storage key data source** can be used to search for and return an existing IONOS Object Storage key. +You can provide a string id which will be compared with provisioned IONOS Object Storage keys. If a single match is found, it will be returned. If your search results in multiple matches, an error will be returned. When this happens, please refine your search string so that it is specific enough to return only one result. @@ -18,7 +18,7 @@ When this happens, please refine your search string so that it is specific enoug ```hcl data "ionoscloud_s3_key" "example" { - id = + id = user_id = } ``` @@ -27,14 +27,14 @@ data "ionoscloud_s3_key" "example" { The following arguments are supported: -- `user_id` - (Required)[string] The UUID of the user owning the S3 Key. -- `id` - (Required) ID of the s3 key you want to search for. +- `user_id` - (Required)[string] The UUID of the user owning the IONOS Object Storage Key. +- `id` - (Required) ID of the IONOS Object Storage key you want to search for. ## Attributes Reference The following attributes are returned by the datasource: -* `id` - The id of the s3 key -* `active` - The state of the s3 key +* `id` - The id of the IONOS Object Storage key +* `active` - The state of the IONOS Object Storage key * `user_id` - The ID of the user that owns the key -* `secret_key` - (Computed)The S3 Secret key. \ No newline at end of file +* `secret_key` - (Computed)The IONOS Object Storage Secret key. \ No newline at end of file diff --git a/docs/data-sources/s3_object.md b/docs/data-sources/s3_object.md index 5415bcb1a..b7930f54f 100644 --- a/docs/data-sources/s3_object.md +++ b/docs/data-sources/s3_object.md @@ -1,10 +1,10 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_object" sidebar_current: "docs-ionoscloud-datasource-s3_object" description: |- - Get information about IonosCloud S3 Objects. + Get information about IONOS Object Storage Objects. --- # ionoscloud_s3_object diff --git a/docs/data-sources/s3_objects.md b/docs/data-sources/s3_objects.md index 2b18bfb81..9d532e261 100644 --- a/docs/data-sources/s3_objects.md +++ b/docs/data-sources/s3_objects.md @@ -1,10 +1,10 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_objects" sidebar_current: "docs-ionoscloud-datasource-s3_objects" description: |- - Get information about IonosCloud S3 Objects. + Get information about IONOS Object Storage Objects. --- # ionoscloud_s3_objects diff --git a/docs/index.md b/docs/index.md index 2c6c33c9c..bb103ce79 100644 --- a/docs/index.md +++ b/docs/index.md @@ -28,7 +28,7 @@ You can set the environment variables for HTTP basic authentication: export IONOS_USERNAME="username" export IONOS_PASSWORD="password" ``` -For managing IONOS S3 STORAGE resources you need to set the following environment variables with your credentials +For managing IONOS Object Storage resources you need to set the following environment variables with your credentials ```bash export IONOS_S3_ACCESS_KEY="accesskey" export IONOS_S3_SECRET_KEY="secretkey" @@ -123,9 +123,9 @@ The following arguments are supported: - `contract_number` - "To be set only for reseller accounts. Allows to run terraform on a contract number under a reseller account.", -- `s3_access_key` - Required for managing IONOS S3 STORAGE resources. +- `s3_access_key` - Required for managing IONOS Object Storage resources. -- `s3_secret_key` - Required for managing IONOS S3 STORAGE resources. +- `s3_secret_key` - Required for managing IONOS Object Storage resources. ### Environment Variables @@ -138,8 +138,8 @@ The following arguments are supported: | `IONOS_LOG_LEVEL` | Specify the Log Level used to log messages. Possible values: Off, Debug, Trace | | `IONOS_PINNED_CERT` | Specify the SHA-256 public fingerprint here, enables certificate pinning | | `IONOS_CONTRACT_NUMBER` | Specify the contract number on which you wish to provision. Only valid for reseller accounts, for other types of accounts the header will be ignored | -| `IONOS_S3_ACCESS_KEY` | Specify the access key used to authenticate against the IONOS S3 STORAGE API | -| `IONOS_S3_SECRET_KEY` | Specify the secret key used to authenticate against the IONOS S3 STORAGE API | +| `IONOS_S3_ACCESS_KEY` | Specify the access key used to authenticate against the IONOS Object Storage API | +| `IONOS_S3_SECRET_KEY` | Specify the secret key used to authenticate against the IONOS Object Storage API | ## Resource Timeout diff --git a/docs/resources/application_loadbalancer.md b/docs/resources/application_loadbalancer.md index 66297bc1d..7b902d9b8 100644 --- a/docs/resources/application_loadbalancer.md +++ b/docs/resources/application_loadbalancer.md @@ -61,7 +61,7 @@ The following arguments are supported: - `logging_lormat` - (Optional)[string] Specifies the format of the logs. - `flowlog` - (Optional)[list] Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - (Required)[string] Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. Immutable, forces re-creation. - - `bucket` - (Required)[string] Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. + - `bucket` - (Required)[string] Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. - `direction` - (Required)[string] Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. Immutable, forces re-creation. - `name` - (Required)[string] Specifies the name of the flow log. diff --git a/docs/resources/autoscaling_group.md b/docs/resources/autoscaling_group.md index 9041bd86c..d3f077d35 100644 --- a/docs/resources/autoscaling_group.md +++ b/docs/resources/autoscaling_group.md @@ -163,7 +163,7 @@ The following arguments are supported: - `type` - (Optional)[string] The type of firewall rule. If is not specified, it will take the default value INGRESS. - `flow_log` - (Optional)[list] Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - (Required)[string] Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. Immutable, forces re-creation. - - `bucket` - (Required)[string] Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. + - `bucket` - (Required)[string] Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. - `direction` - (Required)[string] Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. Immutable, forces re-creation. - `name` - (Required)[string] Specifies the name of the flow log. - `target_group` - (Optional)[list] In order to link VM to ALB, target group must be provided diff --git a/docs/resources/dbaas_pgsql_cluster.md b/docs/resources/dbaas_pgsql_cluster.md index 3a2f37e06..b384bb869 100644 --- a/docs/resources/dbaas_pgsql_cluster.md +++ b/docs/resources/dbaas_pgsql_cluster.md @@ -155,7 +155,7 @@ resource "random_password" "cluster_password" { * `lan_id` - (Required)[true] The LAN to connect your cluster to. * `cidr` - (Required)[true] The IP and subnet for the database. Note the following unavailable IP ranges: 10.233.64.0/18, 10.233.0.0/18, 10.233.114.0/24. Please enter in the correct format like IP/Subnet, exp: 192.168.10.0/24. See [Private IPs](https://www.ionos.com/help/server-cloud-infrastructure/private-network/private-ip-address-ranges/) and [Configuring the network](https://docs.ionos.com/cloud/compute-engine/networks/how-tos/configure-networks). * `location` - (Required)[string] The physical location where the cluster will be created. This will be where all of your instances live. Property cannot be modified after datacenter creation. Possible values are: `de/fra`, `de/txl`, `gb/lhr`, `es/vit`, `us/ewr`, `us/las`. This attribute is immutable(disallowed in update requests). -* `backup_location` - (Optional)(Computed)[string] The S3 location where the backups will be stored. Possible values are: `de`, `eu-south-2`, `eu-central-2`. This attribute is immutable (disallowed in update requests). +* `backup_location` - (Optional)(Computed)[string] The IONOS Object Storage location where the backups will be stored. Possible values are: `de`, `eu-south-2`, `eu-central-2`. This attribute is immutable (disallowed in update requests). * `display_name` - (Required)[string] The friendly name of your cluster. * `maintenance_window` - (Optional)(Computed)[string] A weekly 4 hour-long window, during which maintenance might occur * `time` - (Required)[string] diff --git a/docs/resources/k8s_cluster.md b/docs/resources/k8s_cluster.md index 41b600e94..4f5e403aa 100644 --- a/docs/resources/k8s_cluster.md +++ b/docs/resources/k8s_cluster.md @@ -25,7 +25,7 @@ resource "ionoscloud_k8s_cluster" "example" { } api_subnet_allow_list = ["1.2.3.4/32"] s3_buckets { - name = "globally_unique_s3_bucket_name" + name = "globally_unique_bucket_name" } } ``` @@ -54,7 +54,7 @@ resource "ionoscloud_k8s_cluster" "example" { } api_subnet_allow_list = ["1.2.3.4/32"] s3_buckets { - name = "globally_unique_s3_bucket_name" + name = "globally_unique_bucket_name" } location = "de/fra" nat_gateway_ip = ionoscloud_ipblock.k8sip.ips[0] @@ -74,7 +74,7 @@ The following arguments are supported: - `day_of_the_week` - (Required)[string] Day of the week when maintenance is allowed - `viable_node_pool_versions` - (Computed)[list] List of versions that may be used for node pools under this cluster - `api_subnet_allow_list` - (Optional)[list] Access to the K8s API server is restricted to these CIDRs. Cluster-internal traffic is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value will be used: 32 for IPv4 and 128 for IPv6. -- `s3_buckets` - (Optional)[list] List of S3 bucket configured for K8s usage. For now it contains only an S3 bucket used to store K8s API audit logs. +- `s3_buckets` - (Optional)[list] List of IONOS Object Storage buckets configured for K8s usage. For now it contains only an IONOS Object Storage bucket used to store K8s API audit logs. - `public` - (Optional)[boolean] Indicates if the cluster is public or private. This attribute is immutable. - `nat_gateway_ip` - (Optional)[string] The NAT gateway IP of the cluster if the cluster is private. This attribute is immutable. Must be a reserved IP in the same location as the cluster's location. This attribute is mandatory if the cluster is private. - `node_subnet` - (Optional)[string] The node subnet of the cluster, if the cluster is private. This attribute is optional and immutable. Must be a valid CIDR notation for an IPv4 network prefix of 16 bits length. diff --git a/docs/resources/networkloadbalancer.md b/docs/resources/networkloadbalancer.md index 858284055..72f1d7df8 100644 --- a/docs/resources/networkloadbalancer.md +++ b/docs/resources/networkloadbalancer.md @@ -65,7 +65,7 @@ resource "ionoscloud_networkloadbalancer" "example" { ``` -This will configure flowlog for ALL(rejected and accepted) ingress traffic and will log it into an existing ionos s3 bucket named `flowlog-bucket`. Any s3 compatible client can be used to create it. Adding a flowlog does not force re-creation or the nic, but changing any other field than +This will configure flowlog for ALL(rejected and accepted) ingress traffic and will log it into an existing ionos bucket named `flowlog-bucket`. Any s3 compatible client can be used to create it. Adding a flowlog does not force re-creation or the nic, but changing any other field than `name` will. Deleting a flowlog will also force nic re-creation. ## Argument reference @@ -80,7 +80,7 @@ This will configure flowlog for ALL(rejected and accepted) ingress traffic and w - `datacenter_id` - (Required)[string] A Datacenter's UUID. - `flowlog` - (Optional)[list] Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - (Required)[string] Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. Immutable, forces re-creation. - - `bucket` - (Required)[string] Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. + - `bucket` - (Required)[string] Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. Immutable, forces re-creation. - `direction` - (Required)[string] Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. Immutable, forces re-creation. - `name` - (Required)[string] Specifies the name of the flow log. diff --git a/docs/resources/nic.md b/docs/resources/nic.md index 34d375354..e1225821f 100644 --- a/docs/resources/nic.md +++ b/docs/resources/nic.md @@ -158,7 +158,7 @@ resource "ionoscloud_nic" "example" { } ``` -This will configure flowlog for accepted ingress traffic and will log it into an existing ionos s3 bucket named `flowlog-bucket`. Any s3 compatible client can be used to create it. Adding a flowlog does not force re-creation of the NIC, but changing any other field than +This will configure flowlog for accepted ingress traffic and will log it into an existing IONOS Object Storage bucket named `flowlog-bucket`. Any s3 compatible client can be used to create it. Adding a flowlog does not force re-creation of the NIC, but changing any other field than `name` will. Deleting a flowlog will also force NIC re-creation. ## Argument reference @@ -180,7 +180,7 @@ This will configure flowlog for accepted ingress traffic and will log it into an * `pci_slot`- (Computed) The PCI slot number of the Nic. * `flowlog` - (Optional) Only 1 flow log can be configured. Only the name field can change as part of an update. Flow logs holistically capture network information such as source and destination IP addresses, source and destination ports, number of packets, amount of bytes, the start and end time of the recording, and the type of protocol – and log the extent to which your instances are being accessed. - `action` - (Required) Specifies the action to be taken when the rule is matched. Possible values: ACCEPTED, REJECTED, ALL. Immutable, update forces re-creation. - - `bucket` - (Required) Specifies the S3 IONOS bucket where the flow log data will be stored. The bucket must exist. Immutable, update forces re-creation. + - `bucket` - (Required) Specifies the IONOS Object Storage bucket where the flow log data will be stored. The bucket must exist. Immutable, update forces re-creation. - `direction` - (Required) Specifies the traffic direction pattern. Valid values: INGRESS, EGRESS, BIDIRECTIONAL. Immutable, update forces re-creation. - `name` - (Required) Specifies the name of the flow log. diff --git a/docs/resources/s3_bucket.md b/docs/resources/s3_bucket.md index 3aee589bd..189aaab5e 100644 --- a/docs/resources/s3_bucket.md +++ b/docs/resources/s3_bucket.md @@ -1,15 +1,15 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket" sidebar_current: "docs-resource-s3_bucket" description: |- - Creates and manages IonosCloud S3 Buckets. + Creates and manages IONOS Object Storage Buckets. --- # ionoscloud_s3_bucket -Manages **S3 Buckets** on IonosCloud. +Manages **IONOS Object Storage Buckets** on IonosCloud. ## Example Usage @@ -51,7 +51,7 @@ The following arguments are supported: - `id` - (Computed) Name of the bucket -⚠️ **Note:** The name must be unique across all IONOS accounts in all S3 regions. The name should adhere to the following [restrictions](https://docs.ionos.com/cloud/storage-and-backup/s3-object-storage/concepts/buckets#naming-conventions). +⚠️ **Note:** The name must be unique across all IONOS accounts in all IONOS Object Storage regions. The name should adhere to the following [restrictions](https://docs.ionos.com/cloud/storage-and-backup/s3-object-storage/concepts/buckets#naming-conventions). ## Import diff --git a/docs/resources/s3_bucket_cors_configuration.md b/docs/resources/s3_bucket_cors_configuration.md index e4f60aef2..63a9255b3 100644 --- a/docs/resources/s3_bucket_cors_configuration.md +++ b/docs/resources/s3_bucket_cors_configuration.md @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_cors_configuration" sidebar_current: "docs-resource-s3_bucket_cors_configuration" @@ -48,7 +48,7 @@ The following arguments are supported: Days and years are mutually exclusive. You can only specify one of them. ## Import -S3 Bucket cors configuration can be imported using the `bucket` name. +IONOS Object Storage Bucket cors configuration can be imported using the `bucket` name. ```shell terraform import ionoscloud_s3_bucket_cors_configuration.example example diff --git a/docs/resources/s3_bucket_lifecycle_configuration.md b/docs/resources/s3_bucket_lifecycle_configuration.md index fa83dbf50..24dcd55d0 100644 --- a/docs/resources/s3_bucket_lifecycle_configuration.md +++ b/docs/resources/s3_bucket_lifecycle_configuration.md @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_lifecycle_configuration" sidebar_current: "docs-resource-s3_bucket_lifecycle_configuration" @@ -61,15 +61,15 @@ The following arguments are supported: - `expiration` - (Optional)[block] A lifecycle rule for when an object expires. - `days` - (Optional)[int] Specifies the number of days after object creation when the object expires. Required if 'date' is not specified. - `date` - (Optional)[string] Specifies the date after which you want the specific rule action to take effect. - - `expired_object_delete_marker` - (Optional)[bool] Indicates whether IONOS S3 Object Storage will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no operation. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. + - `expired_object_delete_marker` - (Optional)[bool] Indicates whether IONOS Object Storage will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no operation. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. - `noncurrent_version_expiration` - (Optional)[block] A lifecycle rule for when non-current object versions expire. - - `noncurrent_days` - (Optional)[int] Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. - - `abort_incomplete_multipart_upload` - (Optional)[block] Specifies the days since the initiation of an incomplete multipart upload that IONOS S3 Object Storage will wait before permanently removing all parts of the upload. - - `days_after_initiation` - (Optional)[int] Specifies the number of days after which IONOS S3 Object Storage aborts an incomplete multipart upload. + - `noncurrent_days` - (Optional)[int] Specifies the number of days an object is noncurrent before the associated action can be performed. + - `abort_incomplete_multipart_upload` - (Optional)[block] Specifies the days since the initiation of an incomplete multipart upload that IONOS Object Storage will wait before permanently removing all parts of the upload. + - `days_after_initiation` - (Optional)[int] Specifies the number of days after which IONOS Object Storage aborts an incomplete multipart upload. ## Import -S3 Bucket lifecycle configuration can be imported using the `bucket` name. +IONOS Object Storage Bucket lifecycle configuration can be imported using the `bucket` name. ```shell terraform import ionoscloud_s3_bucket_lifecycle_configuration.example example diff --git a/docs/resources/s3_bucket_object_lock_configuration.md b/docs/resources/s3_bucket_object_lock_configuration.md index 28ac11e28..bdcb9356f 100644 --- a/docs/resources/s3_bucket_object_lock_configuration.md +++ b/docs/resources/s3_bucket_object_lock_configuration.md @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_object_lock_configuration" sidebar_current: "docs-resource-s3_bucket_object_lock_configuration" @@ -46,7 +46,7 @@ The following arguments are supported: Days and years are mutually exclusive. You can only specify one of them. ## Import -S3 Bucket object lock configuration can be imported using the `bucket` name. +IONOS Object Storage Bucket object lock configuration can be imported using the `bucket` name. ```shell terraform import ionoscloud_s3_bucket_object_lock_configuration.example example diff --git a/docs/resources/s3_bucket_policy.md b/docs/resources/s3_bucket_policy.md index b9c2a6376..f26e518a0 100644 --- a/docs/resources/s3_bucket_policy.md +++ b/docs/resources/s3_bucket_policy.md @@ -1,15 +1,15 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_policy" sidebar_current: "docs-resource-s3_bucket_policy" description: |- - Creates and manages IonosCloud S3 Buckets policies. + Creates and manages IonosCloud IONOS Object Storage Buckets policies. --- # ionoscloud_s3_bucket_policy -Manages **S3 Buckets policies** on IonosCloud. +Manages **Buckets policies** on IonosCloud. ## Example Usage diff --git a/docs/resources/s3_bucket_public_access_block.md b/docs/resources/s3_bucket_public_access_block.md index 698e9136b..6463ff400 100644 --- a/docs/resources/s3_bucket_public_access_block.md +++ b/docs/resources/s3_bucket_public_access_block.md @@ -1,10 +1,10 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_public_access_block" sidebar_current: "docs-resource-s3_bucket_public_access_block" description: |- - Creates and manages IonosCloud S3 Public Access Block for buckets. + Creates and manages IonosCloud IONOS Object Storage Public Access Block for buckets. --- # ionoscloud_s3_public_access_block diff --git a/docs/resources/s3_bucket_server_side_encryption_configuration.md b/docs/resources/s3_bucket_server_side_encryption_configuration.md index 247f8d2bb..fdf7145cd 100644 --- a/docs/resources/s3_bucket_server_side_encryption_configuration.md +++ b/docs/resources/s3_bucket_server_side_encryption_configuration.md @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_server_side_encryption_configuration" sidebar_current: "docs-resource-s3_bucket_server_side_encryption_configuration" @@ -38,7 +38,7 @@ The following arguments are supported: - `sse_algorithm` - (Required)[string] Server-side encryption algorithm to use. Valid values are 'AES256' ## Import -S3 Bucket server side encryption configuration can be imported using the `bucket` name. +IONOS Object Storage Bucket server side encryption configuration can be imported using the `bucket` name. ```shell terraform import ionoscloud_s3_bucket_server_side_encryption_configuration.example example diff --git a/docs/resources/s3_bucket_versioning.md b/docs/resources/s3_bucket_versioning.md index f04831195..bf3869aa7 100644 --- a/docs/resources/s3_bucket_versioning.md +++ b/docs/resources/s3_bucket_versioning.md @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_versioning" sidebar_current: "docs-resource-s3_bucket_versioning" @@ -37,7 +37,7 @@ The following arguments are supported: - `mfa_delete` - (Optional)[string] Specifies whether MFA delete is enabled or not. Can be `Enabled` or `Disabled`. ## Import -S3 Bucket Versioning can be imported using the `bucket` name. +IONOS Object Storage Bucket Versioning can be imported using the `bucket` name. ```shell terraform import ionoscloud_s3_bucket_versioning.example example diff --git a/docs/resources/s3_bucket_website_configuration.md b/docs/resources/s3_bucket_website_configuration.md index fb8e5af55..f60ac386c 100644 --- a/docs/resources/s3_bucket_website_configuration.md +++ b/docs/resources/s3_bucket_website_configuration.md @@ -1,5 +1,5 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_bucket_website_configuration" sidebar_current: "docs-resource-s3_bucket_website_configuration" @@ -64,7 +64,7 @@ The following arguments are supported: - `http_redirect_code` - (Optional) The HTTP redirect code to use on the response. Not required if one of the siblings is present. ## Import -S3 Bucket website configuration can be imported using the `bucket` name. +IONOS Object Storage Bucket website configuration can be imported using the `bucket` name. ```shell terraform import ionoscloud_s3_bucket_website_configuration.example example diff --git a/docs/resources/s3_key.md b/docs/resources/s3_key.md index e4efc79b3..86788515f 100644 --- a/docs/resources/s3_key.md +++ b/docs/resources/s3_key.md @@ -4,12 +4,12 @@ layout: "ionoscloud" page_title: "IonosCloud: s3_key" sidebar_current: "docs-resource-s3-key" description: |- - Creates and manages IonosCloud S3 keys. + Creates and manages IONOS Object Storage keys. --- # ionoscloud_s3_key -Manages an **S3 Key** on IonosCloud. +Manages an **IONOS Object Storage Key** on IonosCloud. ## Example Usage @@ -33,16 +33,16 @@ resource "ionoscloud_s3_key" "example" { The following arguments are supported: -- `user_id` - (Required)[string] The UUID of the user owning the S3 Key. -- `active` - (Optional)[boolean] Whether the S3 is active / enabled or not - Please keep in mind this is only required on create. Default value in true -- `secret_key` - (Computed) The S3 Secret key. +- `user_id` - (Required)[string] The UUID of the user owning the IONOS Object Storage Key. +- `active` - (Optional)[boolean] Whether the IONOS Object Storage is active / enabled or not - Please keep in mind this is only required on create. Default value in true +- `secret_key` - (Computed) The IONOS Object Storage Secret key. ## Import -An S3 Unit resource can be imported using its user id as well as its `resource id`, e.g. +An IONOS Object Storage Unit resource can be imported using its user id as well as its `resource id`, e.g. ```shell terraform import ionoscloud_s3_key.demo {userId}/{s3KeyId} ``` -This can be helpful when you want to import S3 Keys which you have already created manually or using other means, outside of terraform. +This can be helpful when you want to import IONOS Object Storage Keys which you have already created manually or using other means, outside of terraform. diff --git a/docs/resources/s3_object.md b/docs/resources/s3_object.md index 36c4374c7..98b483826 100644 --- a/docs/resources/s3_object.md +++ b/docs/resources/s3_object.md @@ -1,15 +1,15 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_object" sidebar_current: "docs-resource-s3_object" description: |- - Creates and manages IonosCloud S3 Objects. + Creates and manages IONOS Object Storage Objects. --- # ionoscloud_s3_object -Manages **S3 Objects** on IonosCloud. +Manages **IONOS Object Storage Objects** on IonosCloud. ## Example Usage @@ -70,19 +70,19 @@ The following arguments are supported: - `content_language` - (Optional)[string] The natural language or languages of the intended audience for the object. - `content_type` - (Optional)[string] A standard MIME type describing the format of the contents. - `expires` - (Optional)[string] The date and time at which the object is no longer cacheable. -- `server_side_encryption` - (Optional)[string] The server-side encryption algorithm used when storing this object in IONOS S3 Object Storage. Valid value is AES256. +- `server_side_encryption` - (Optional)[string] The server-side encryption algorithm used when storing this object in IONOS Object Storage. Valid value is AES256. - `storage_class` - (Optional)[string] The storage class of the object. Valid value is STANDARD. Default is STANDARD. - `website_redirect` - (Optional)[string] Redirects requests for this object to another object in the same bucket or to an external URL. - `server_side_encryption_customer_algorithm` - (Optional)[string] Specifies the algorithm to use for encrypting the object. Valid value is AES256. - `server_side_encryption_customer_key` - (Optional)[string] Specifies the 256-bit, base64-encoded encryption key to use to encrypt and decrypt your data. - `server_side_encryption_customer_key_md5` - (Optional)[string] Specifies the 128-bit MD5 digest of the encryption key. -- `server_side_encryption_context` - (Optional)[string] Specifies the IONOS S3 Object Storage Encryption Context for object encryption. +- `server_side_encryption_context` - (Optional)[string] Specifies the IONOS Object Storage Encryption Context for object encryption. - `request_payer` - (Optional)[string] Confirms that the requester knows that they will be charged for the request. - `object_lock_mode` - (Optional)[string] The object lock mode that you want to apply to the object. Valid values are `GOVERNANCE` and `COMPLIANCE`. - `object_lock_retain_until_date` - (Optional)[string] The date and time when the object lock retention expires.Must be in RFC3999 format - `object_lock_legal_hold` - (Optional)[string] Indicates whether a legal hold is in effect for the object. Valid values are `ON` and `OFF`. - `etag` - (Computed)[string] An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. -- `metadata` - (Optional)[map] A map of metadata to store with the object in IONOS S3 Object Storage. Metadata keys must be lowercase alphanumeric characters. +- `metadata` - (Optional)[map] A map of metadata to store with the object in IONOS Object Storage. Metadata keys must be lowercase alphanumeric characters. - `tags` - (Optional)[map] The tag-set for the object. - `version_id` - (Computed)[string] The version of the object. - `mfa` - (Optional) [string]The concatenation of the authentication device's serial number, a space, and the value displayed on your authentication device. diff --git a/docs/resources/s3_object_copy.md b/docs/resources/s3_object_copy.md index 15c0f38eb..3e13605d7 100644 --- a/docs/resources/s3_object_copy.md +++ b/docs/resources/s3_object_copy.md @@ -1,15 +1,15 @@ --- -subcategory: "S3" +subcategory: "Object Storage" layout: "ionoscloud" page_title: "IonosCloud: s3_object_copy" sidebar_current: "docs-resource-s3_object_copy" description: |- - Creates a copy of an object that is already stored in IONOS S3 Object Storage. + Creates a copy of an object that is already stored in IONOS Object Storage. --- # ionoscloud_s3_object_copy -Creates a copy of an object that is already stored in IONOS S3 Object Storage. +Creates a copy of an object that is already stored in IONOS Object Storage. ## Example Usage @@ -55,13 +55,13 @@ The following arguments are supported: - `content_language` - (Optional)[string] The natural language or languages of the intended audience for the object. - `content_type` - (Optional)[string] A standard MIME type describing the format of the contents. - `expires` - (Optional)[string] The date and time at which the object is no longer cacheable. -- `server_side_encryption` - (Optional)[string] The server-side encryption algorithm used when storing this object in IONOS S3 Object Storage. Valid value is AES256. +- `server_side_encryption` - (Optional)[string] The server-side encryption algorithm used when storing this object in IONOS Object Storage. Valid value is AES256. - `storage_class` - (Optional)[string] The storage class of the object. Valid value is STANDARD. Default is STANDARD. - `website_redirect` - (Optional)[string] Redirects requests for this object to another object in the same bucket or to an external URL. - `server_side_encryption_customer_algorithm` - (Optional)[string] Specifies the algorithm to use for encrypting the object. Valid value is AES256. - `server_side_encryption_customer_key` - (Optional)[string] Specifies the 256-bit, base64-encoded encryption key to use to encrypt and decrypt your data. - `server_side_encryption_customer_key_md5` - (Optional)[string] Specifies the 128-bit MD5 digest of the encryption key. -- `server_side_encryption_context` - (Optional)[string] Specifies the IONOS S3 Object Storage Encryption Context for object encryption. +- `server_side_encryption_context` - (Optional)[string] Specifies the IONOS Object Storage Encryption Context for object encryption. - `source_customer_algorithm` - (Optional)[string] Specifies the algorithm used for source object encryption. Valid value is AES256. - `source_customer_key` - (Optional)[string] Specifies the 256-bit, base64-encoded encryption key for source object encryption. - `source_customer_key_md5` - (Optional)[string] Specifies the 128-bit MD5 digest of the encryption key for source object encryption. @@ -71,7 +71,7 @@ The following arguments are supported: - `etag` - (Computed)[string] An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. - `last_modified` - (Computed)[string] The date and time at which the object was last modified. - `metadata_directive` - (Optional)[string] Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request. Valid values are `COPY` and `REPLACE`. -- `metadata` - (Optional)[map] A map of metadata to store with the object in IONOS S3 Object Storage. Metadata keys must be lowercase alphanumeric characters. +- `metadata` - (Optional)[map] A map of metadata to store with the object in IONOS Object Storage. Metadata keys must be lowercase alphanumeric characters. - `tagging_directive` - (Optional)[string] Specifies whether the object tag-set is copied from the source object or replaced with tag-set provided in the request. Valid values are `COPY` and `REPLACE`. - `tags` - (Optional)[map] The tag-set for the object. - `version_id` - (Computed)[string] The version of the object. diff --git a/docs/resources/user.md b/docs/resources/user.md index dfcfe7ca4..43641fac8 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -65,7 +65,7 @@ resource "random_password" "user_password" { * `last_name` - (Required)[string] A last name for the user. * `password` - (Required)[string] A password for the user. * `sec_auth_active` - (Optional)[Boolean] Indicates if secure authentication is active for the user or not. *it can not be used in create requests - can be used in update* -* `s3_canonical_user_id` - (Computed) Canonical (S3) id of the user for a given identity +* `s3_canonical_user_id` - (Computed) Canonical (IONOS Object Storage) id of the user for a given identity * `active` - (Optional)[Boolean] Indicates if the user is active * `group_ids` - (Optional)[Set] The groups that this user will be a member of diff --git a/gitbook_docs/summary.md b/gitbook_docs/summary.md index 73aef3043..985c94cc4 100644 --- a/gitbook_docs/summary.md +++ b/gitbook_docs/summary.md @@ -77,13 +77,13 @@ * User Management * Resources * [Group](../docs/resources/group.md) - * [S3 Key](../docs/resources/s3_key.md) + * [Object Storage Key](../docs/resources/s3_key.md) * [Share](../docs/resources/share.md) * [User](../docs/resources/user.md) * Data Sources * [Group](../docs/data-sources/group.md) * [Resource](../docs/data-sources/resource.md) - * [S3 Key](../docs/data-sources/s3_key.md) + * [Key](../docs/data-sources/s3_key.md) * [Share](../docs/data-sources/share.md) * [User](../docs/data-sources/user.md) @@ -184,25 +184,24 @@ * [Network File Storage Cluster](../docs/data-sources/nfs_cluster.md) * [Network File Storage Share](../docs/data-sources/nfs_share.md) -* S3 - * Resources - * [S3 Bucket](../docs/resources/s3_bucket.md) - * [S3 Bucket Policy](../docs/resources/s3_bucket_policy.md) - * [S3 Object](../docs/resources/s3_object.md) - * [S3 Bucket Public Access Block](../docs/resources/s3_bucket_public_access_block) - * [S3 Bucket Website Configuration](../docs/resources/s3_bucket_website_configuration.md) - * [S3 Bucket CORS Configuration](../docs/resources/s3_bucket_cors_configuration.md) - * [S3 Bucket Lifecycle Configuration](../docs/resources/s3_bucket_lifecycle_configuration.md) - * [S3 Bucket Object Lock Configuration](../docs/resources/s3_bucket_object_lock_configuration.md) - * [S3 Bucket Versioning Configuration](../docs/resources/s3_bucket_versioning.md) - * [S3 Bucket Server Side Encryption Configuration](../docs/resources/s3_bucket_server_side_encryption_configuration.md) - * [S3 Object Copy](../docs/resources/s3_object_copy.md) - * Data Sources - * [S3 Bucket](../docs/data-sources/s3_bucket.md) - * [S3 Bucket Policy](../docs/data-sources/s3_bucket_policy.md) - * [S3 Object](../docs/data-sources/s3_object.md) - * [S3 Bucket Public Access Block](../docs/data-sources/s3_bucket_access_block.md) - * [S3 Objects](../docs/data-sources/s3_objects.md) +* Object Storage + * Resources + * [Bucket](../docs/resources/s3_bucket.md) + * [Bucket Policy](../docs/resources/s3_bucket_policy.md) + * [Object](../docs/resources/s3_object.md) + * [Bucket Public Access Block](../docs/resources/s3_bucket_public_access_block) + * [Bucket Website Configuration](../docs/resources/s3_bucket_website_configuration.md) + * [Bucket CORS Configuration](../docs/resources/s3_bucket_cors_configuration.md) + * [Bucket Lifecycle Configuration](../docs/resources/s3_bucket_lifecycle_configuration.md) + * [Bucket Object Lock Configuration](../docs/resources/s3_bucket_object_lock_configuration.md) + * [Bucket Versioning Configuration](../docs/resources/s3_bucket_versioning.md) + * [Bucket Server Side Encryption Configuration](../docs/resources/s3_bucket_server_side_encryption_configuration.md) + * [Object Copy](../docs/resources/s3_object_copy.md) + * Data Sources + * [Bucket](../docs/data-sources/s3_bucket.md) + * [Bucket Policy](../docs/data-sources/s3_bucket_policy.md) + * [Object](../docs/data-sources/s3_object.md) + * [Objects](../docs/data-sources/s3_objects.md) * CDN * Resources diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index 229ea4020..a180d164e 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -13,12 +13,12 @@ import ( "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" "github.com/hashicorp/terraform-plugin-framework/providerserver" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/envar" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/framework/provider" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/ionoscloud" - s3service "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + objstorageservice "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" ) const ( @@ -91,13 +91,13 @@ func PreCheck(t *testing.T) { }) } -// S3Client returns a new S3 client for acceptance testing -func S3Client() (*s3.APIClient, error) { +// ObjectStorageClient returns a new S3 client for acceptance testing +func ObjectStorageClient() (*objstorage.APIClient, error) { accessKey := os.Getenv(envar.IonosS3AccessKey) secretKey := os.Getenv(envar.IonosS3SecretKey) if accessKey == "" || secretKey == "" { return nil, fmt.Errorf("%s and %s must be set for acceptance tests", envar.IonosS3AccessKey, envar.IonosS3SecretKey) } - return s3service.NewClient(accessKey, secretKey, "").GetBaseClient(), nil + return objstorageservice.NewClient(accessKey, secretKey, "").GetBaseClient(), nil } diff --git a/internal/envar/envar.go b/internal/envar/envar.go index 0316a3287..1c0a6883e 100644 --- a/internal/envar/envar.go +++ b/internal/envar/envar.go @@ -13,9 +13,9 @@ const ( IonosUsername = "IONOS_USERNAME" // IonosPassword is the environment variable name for the Ionos Cloud API password. IonosPassword = "IONOS_PASSWORD" - // IonosS3AccessKey is the environment variable name for the Ionos Cloud S3 access key. + // IonosS3AccessKey is the environment variable name for the IONOS Object Storage access key. IonosS3AccessKey = "IONOS_S3_ACCESS_KEY" - // IonosS3SecretKey is the environment variable name for the Ionos Cloud S3 secret key. + // IonosS3SecretKey is the environment variable name for the IONOS Object Storage secret key. IonosS3SecretKey = "IONOS_S3_SECRET_KEY" ) diff --git a/internal/framework/provider/provider.go b/internal/framework/provider/provider.go index 4acdf1d17..380986903 100644 --- a/internal/framework/provider/provider.go +++ b/internal/framework/provider/provider.go @@ -11,8 +11,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/framework/services/s3" - s3service "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/framework/services/objectstorage" + objstorage "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" ) // ClientOptions is the configuration for the provider. @@ -72,15 +72,15 @@ func (p *IonosCloudProvider) Schema(ctx context.Context, req provider.SchemaRequ }, "s3_secret_key": schema.StringAttribute{ Optional: true, - Description: "Secret key for IONOS S3 operations.", + Description: "Secret key for IONOS Object Storage operations.", }, "s3_access_key": schema.StringAttribute{ Optional: true, - Description: "Access key for IONOS S3 operations.", + Description: "Access key for IONOS Object Storage operations.", }, "s3_region": schema.StringAttribute{ Optional: true, - Description: "Region for IONOS S3 operations.", + Description: "Region for IONOS Object Storage operations.", }, }, } @@ -153,7 +153,7 @@ func (p *IonosCloudProvider) Configure(ctx context.Context, req provider.Configu return } - client := s3service.NewClient(accessKey, secretKey, region) + client := objstorage.NewClient(accessKey, secretKey, region) resp.DataSourceData = client resp.ResourceData = client } @@ -161,26 +161,26 @@ func (p *IonosCloudProvider) Configure(ctx context.Context, req provider.Configu // Resources returns the resources for the provider. func (p *IonosCloudProvider) Resources(_ context.Context) []func() resource.Resource { return []func() resource.Resource{ - s3.NewBucketResource, - s3.NewBucketPolicyResource, - s3.NewObjectResource, - s3.NewObjectCopyResource, - s3.NewBucketPublicAccessBlockResource, - s3.NewBucketVersioningResource, - s3.NewObjectLockConfigurationResource, - s3.NewServerSideEncryptionConfigurationResource, - s3.NewBucketCorsConfigurationResource, - s3.NewBucketLifecycleConfigurationResource, - s3.NewBucketWebsiteConfigurationResource, + objectstorage.NewBucketResource, + objectstorage.NewBucketPolicyResource, + objectstorage.NewObjectResource, + objectstorage.NewObjectCopyResource, + objectstorage.NewBucketPublicAccessBlockResource, + objectstorage.NewBucketVersioningResource, + objectstorage.NewObjectLockConfigurationResource, + objectstorage.NewServerSideEncryptionConfigurationResource, + objectstorage.NewBucketCorsConfigurationResource, + objectstorage.NewBucketLifecycleConfigurationResource, + objectstorage.NewBucketWebsiteConfigurationResource, } } // DataSources returns the data sources for the provider. func (p *IonosCloudProvider) DataSources(_ context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ - s3.NewBucketDataSource, - s3.NewObjectDataSource, - s3.NewBucketPolicyDataSource, - s3.NewObjectsDataSource, + objectstorage.NewBucketDataSource, + objectstorage.NewObjectDataSource, + objectstorage.NewBucketPolicyDataSource, + objectstorage.NewObjectsDataSource, } } diff --git a/internal/framework/services/s3/data_source_bucket.go b/internal/framework/services/objectstorage/data_source_bucket.go similarity index 84% rename from internal/framework/services/s3/data_source_bucket.go rename to internal/framework/services/objectstorage/data_source_bucket.go index 9f88bf0bf..ab909f41f 100644 --- a/internal/framework/services/s3/data_source_bucket.go +++ b/internal/framework/services/objectstorage/data_source_bucket.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" @@ -18,7 +18,7 @@ func NewBucketDataSource() datasource.DataSource { } type bucketDataSource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the data source. @@ -34,11 +34,11 @@ func (d *bucketDataSource) Configure(ctx context.Context, req datasource.Configu return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -66,11 +66,11 @@ func (d *bucketDataSource) Schema(ctx context.Context, req datasource.SchemaRequ // Read reads the data source. func (d *bucketDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { if d.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketDataSourceModel + var data *objectstorage.BucketDataSourceModel resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/data_source_bucket_policy.go b/internal/framework/services/objectstorage/data_source_bucket_policy.go similarity index 85% rename from internal/framework/services/s3/data_source_bucket_policy.go rename to internal/framework/services/objectstorage/data_source_bucket_policy.go index 0d4cc1437..f07fa4489 100644 --- a/internal/framework/services/s3/data_source_bucket_policy.go +++ b/internal/framework/services/objectstorage/data_source_bucket_policy.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" "github.com/hashicorp/terraform-plugin-framework/datasource" @@ -19,7 +19,7 @@ func NewBucketPolicyDataSource() datasource.DataSource { } type bucketPolicyDataSource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the data source. @@ -35,11 +35,11 @@ func (d *bucketPolicyDataSource) Configure(ctx context.Context, req datasource.C return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -68,11 +68,11 @@ func (d *bucketPolicyDataSource) Schema(ctx context.Context, req datasource.Sche // Read reads the data source. func (d *bucketPolicyDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { if d.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPolicyModel + var data *objectstorage.BucketPolicyModel resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/data_source_bucket_policy_test.go b/internal/framework/services/objectstorage/data_source_bucket_policy_test.go similarity index 92% rename from internal/framework/services/s3/data_source_bucket_policy_test.go rename to internal/framework/services/objectstorage/data_source_bucket_policy_test.go index bb6dc25ae..d31ff37bf 100644 --- a/internal/framework/services/s3/data_source_bucket_policy_test.go +++ b/internal/framework/services/objectstorage/data_source_bucket_policy_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "testing" diff --git a/internal/framework/services/s3/data_source_bucket_test.go b/internal/framework/services/objectstorage/data_source_bucket_test.go similarity index 92% rename from internal/framework/services/s3/data_source_bucket_test.go rename to internal/framework/services/objectstorage/data_source_bucket_test.go index a5eabc65b..6cd421c72 100644 --- a/internal/framework/services/s3/data_source_bucket_test.go +++ b/internal/framework/services/objectstorage/data_source_bucket_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "testing" diff --git a/internal/framework/services/s3/data_source_object.go b/internal/framework/services/objectstorage/data_source_object.go similarity index 93% rename from internal/framework/services/s3/data_source_object.go rename to internal/framework/services/objectstorage/data_source_object.go index fce19c23b..54e88e559 100644 --- a/internal/framework/services/s3/data_source_object.go +++ b/internal/framework/services/objectstorage/data_source_object.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" ) var ( @@ -23,7 +23,7 @@ func NewObjectDataSource() datasource.DataSource { } type objectDataSource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the object data source. @@ -39,11 +39,11 @@ func (d *objectDataSource) Configure(ctx context.Context, req datasource.Configu return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -148,7 +148,7 @@ func (d *objectDataSource) Schema(_ context.Context, req datasource.SchemaReques // Read the data source func (d *objectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var data *s3.ObjectDataSourceModel + var data *objectstorage.ObjectDataSourceModel // Read configuration diags := req.Config.Get(ctx, &data) diff --git a/internal/framework/services/s3/data_source_object_test.go b/internal/framework/services/objectstorage/data_source_object_test.go similarity index 98% rename from internal/framework/services/s3/data_source_object_test.go rename to internal/framework/services/objectstorage/data_source_object_test.go index ec86ff1fc..14264b1b2 100644 --- a/internal/framework/services/s3/data_source_object_test.go +++ b/internal/framework/services/objectstorage/data_source_object_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "fmt" diff --git a/internal/framework/services/s3/data_source_objects.go b/internal/framework/services/objectstorage/data_source_objects.go similarity index 89% rename from internal/framework/services/s3/data_source_objects.go rename to internal/framework/services/objectstorage/data_source_objects.go index b74be4592..6cc790df4 100644 --- a/internal/framework/services/s3/data_source_objects.go +++ b/internal/framework/services/objectstorage/data_source_objects.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -15,10 +15,10 @@ import ( ) type objectsDataSource struct { - client *s3.Client + client *objectstorage.Client } -// NewObjectsDataSource creates a new data source for fetching objects from an S3 bucket. +// NewObjectsDataSource creates a new data source for fetching objects from a bucket. func NewObjectsDataSource() datasource.DataSource { return &objectsDataSource{} } @@ -75,11 +75,11 @@ func (d *objectsDataSource) Configure(ctx context.Context, req datasource.Config return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -89,7 +89,7 @@ func (d *objectsDataSource) Configure(ctx context.Context, req datasource.Config } func (d *objectsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var data *s3.ObjectsDataSourceModel + var data *objectstorage.ObjectsDataSourceModel diags := req.Config.Get(ctx, &data) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/framework/services/s3/data_source_objects_test.go b/internal/framework/services/objectstorage/data_source_objects_test.go similarity index 95% rename from internal/framework/services/s3/data_source_objects_test.go rename to internal/framework/services/objectstorage/data_source_objects_test.go index 2a04d9ac0..2c7d39bf5 100644 --- a/internal/framework/services/s3/data_source_objects_test.go +++ b/internal/framework/services/objectstorage/data_source_objects_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -12,7 +12,7 @@ import ( "testing" ) -func TestAccS3ObjectsDataSource_basic(t *testing.T) { +func TestAccObjectsDataSource_basic(t *testing.T) { rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" @@ -35,7 +35,7 @@ func TestAccS3ObjectsDataSource_basic(t *testing.T) { }) } -func TestAccS3ObjectsDataSource_prefixes(t *testing.T) { +func TestAccObjectsDataSource_prefixes(t *testing.T) { rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" @@ -58,7 +58,7 @@ func TestAccS3ObjectsDataSource_prefixes(t *testing.T) { }) } -func TestAccS3ObjectsDataSource_encoded(t *testing.T) { +func TestAccObjectsDataSource_encoded(t *testing.T) { rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" @@ -82,7 +82,7 @@ func TestAccS3ObjectsDataSource_encoded(t *testing.T) { }) } -func TestAccS3ObjectsDataSource_maxKeysSmall(t *testing.T) { +func TestAccObjectsDataSource_maxKeysSmall(t *testing.T) { rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" @@ -113,7 +113,7 @@ func TestAccS3ObjectsDataSource_maxKeysSmall(t *testing.T) { }) } -func TestAccS3ObjectsDataSource_maxKeysLarge(t *testing.T) { +func TestAccObjectsDataSource_maxKeysLarge(t *testing.T) { ctx := context.Background() rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" @@ -150,7 +150,7 @@ func TestAccS3ObjectsDataSource_maxKeysLarge(t *testing.T) { }) } -func TestAccS3ObjectsDataSource_startAfter(t *testing.T) { +func TestAccObjectsDataSource_startAfter(t *testing.T) { rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" @@ -173,7 +173,7 @@ func TestAccS3ObjectsDataSource_startAfter(t *testing.T) { }) } -func TestAccS3ObjectsDataSource_fetchOwner(t *testing.T) { +func TestAccObjectsDataSource_fetchOwner(t *testing.T) { rName := acctest.GenerateRandomResourceName(bucketPrefix) dataSourceName := "data.ionoscloud_s3_objects.test" diff --git a/internal/framework/services/objectstorage/errors.go b/internal/framework/services/objectstorage/errors.go new file mode 100644 index 000000000..92bc99cf8 --- /dev/null +++ b/internal/framework/services/objectstorage/errors.go @@ -0,0 +1,31 @@ +package objectstorage + +import ( + "errors" + "fmt" + + objstorage "github.com/ionos-cloud/sdk-go-s3" +) + +func formatXMLError(err error) error { + var apiErr objstorage.GenericOpenAPIError + if errors.As(err, &apiErr) { + if objErr, ok := apiErr.Model().(objstorage.Error); ok { + msg := "" + if objErr.Code != nil { + msg += fmt.Sprintf("code:%s\n", *objErr.Code) + } + if objErr.Message != nil { + msg += fmt.Sprintf("message:%s\n", *objErr.Message) + } + if objErr.HostId != nil { + msg += fmt.Sprintf("host:%s\n", *objErr.HostId) + } + if objErr.RequestId != nil { + msg += fmt.Sprintf("request:%s\n", *objErr.RequestId) + } + return errors.New(msg) + } + } + return err +} diff --git a/internal/framework/services/s3/resource_bucket.go b/internal/framework/services/objectstorage/resource_bucket.go similarity index 92% rename from internal/framework/services/s3/resource_bucket.go rename to internal/framework/services/objectstorage/resource_bucket.go index 3316a4e8c..ab556680a 100644 --- a/internal/framework/services/s3/resource_bucket.go +++ b/internal/framework/services/objectstorage/resource_bucket.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -19,7 +19,7 @@ import ( "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/tags" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils" ) @@ -35,7 +35,7 @@ func NewBucketResource() resource.Resource { } type bucketResource struct { - client *s3.Client + client *objectstorage.Client } type bucketResourceModel struct { @@ -113,11 +113,11 @@ func (r *bucketResource) Configure(_ context.Context, req resource.ConfigureRequ return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -129,7 +129,7 @@ func (r *bucketResource) Configure(_ context.Context, req resource.ConfigureRequ // Create creates the bucket. func (r *bucketResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } @@ -164,7 +164,7 @@ func (r *bucketResource) Create(ctx context.Context, req resource.CreateRequest, // Read reads the bucket. func (r *bucketResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } @@ -222,7 +222,7 @@ func (r *bucketResource) Update(ctx context.Context, req resource.UpdateRequest, // Delete deletes the bucket. func (r *bucketResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } diff --git a/internal/framework/services/s3/resource_bucket_cors_configuration.go b/internal/framework/services/objectstorage/resource_bucket_cors_configuration.go similarity index 93% rename from internal/framework/services/s3/resource_bucket_cors_configuration.go rename to internal/framework/services/objectstorage/resource_bucket_cors_configuration.go index 2e204875b..282463f32 100644 --- a/internal/framework/services/s3/resource_bucket_cors_configuration.go +++ b/internal/framework/services/objectstorage/resource_bucket_cors_configuration.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" @@ -23,7 +23,7 @@ var ( ) type bucketCorsConfiguration struct { - client *s3.Client + client *objectstorage.Client } // NewBucketCorsConfigurationResource creates a new resource for the bucket CORS configuration resource. @@ -102,11 +102,11 @@ func (r *bucketCorsConfiguration) Configure(_ context.Context, req resource.Conf return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -122,7 +122,7 @@ func (r *bucketCorsConfiguration) Create(ctx context.Context, req resource.Creat return } - var data *s3.BucketCorsConfigurationModel + var data *objectstorage.BucketCorsConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -143,7 +143,7 @@ func (r *bucketCorsConfiguration) Read(ctx context.Context, req resource.ReadReq return } - var data *s3.BucketCorsConfigurationModel + var data *objectstorage.BucketCorsConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -176,7 +176,7 @@ func (r *bucketCorsConfiguration) Update(ctx context.Context, req resource.Updat return } - var data *s3.BucketCorsConfigurationModel + var data *objectstorage.BucketCorsConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -197,7 +197,7 @@ func (r *bucketCorsConfiguration) Delete(ctx context.Context, req resource.Delet return } - var data *s3.BucketCorsConfigurationModel + var data *objectstorage.BucketCorsConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_cors_configuration_test.go b/internal/framework/services/objectstorage/resource_bucket_cors_configuration_test.go similarity index 97% rename from internal/framework/services/s3/resource_bucket_cors_configuration_test.go rename to internal/framework/services/objectstorage/resource_bucket_cors_configuration_test.go index cd228cc16..9851ad41c 100644 --- a/internal/framework/services/s3/resource_bucket_cors_configuration_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_cors_configuration_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -143,7 +143,7 @@ resource "ionoscloud_s3_bucket_cors_configuration" "test" { } func testAccCheckBucketCORSConfigurationDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -177,7 +177,7 @@ func testAccCheckCORSConfigurationExists(ctx context.Context, n string) resource return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_lifecycle_configuration.go b/internal/framework/services/objectstorage/resource_bucket_lifecycle_configuration.go similarity index 87% rename from internal/framework/services/s3/resource_bucket_lifecycle_configuration.go rename to internal/framework/services/objectstorage/resource_bucket_lifecycle_configuration.go index 4e1b1c7ed..877c81fd5 100644 --- a/internal/framework/services/s3/resource_bucket_lifecycle_configuration.go +++ b/internal/framework/services/objectstorage/resource_bucket_lifecycle_configuration.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" @@ -21,7 +21,7 @@ var ( ) type bucketLifecycleConfiguration struct { - client *s3.Client + client *objectstorage.Client } // NewBucketLifecycleConfigurationResource creates a new resource for the bucket lifecycle configuration resource. @@ -40,7 +40,7 @@ func (r *bucketLifecycleConfiguration) Schema(ctx context.Context, req resource. Attributes: map[string]schema.Attribute{ "bucket": schema.StringAttribute{ Required: true, - Description: "The name of the S3 bucket.", + Description: "The name of the bucket.", Validators: []validator.String{ stringvalidator.LengthBetween(3, 63), }, @@ -91,7 +91,7 @@ func (r *bucketLifecycleConfiguration) Schema(ctx context.Context, req resource. }, "expired_object_delete_marker": schema.BoolAttribute{ Optional: true, - Description: "Indicates whether IONOS S3 Object Storage will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no operation. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.", + Description: "Indicates whether IONOS Object Storage Object Storage will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no operation. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.", }, }, }, @@ -100,7 +100,7 @@ func (r *bucketLifecycleConfiguration) Schema(ctx context.Context, req resource. Attributes: map[string]schema.Attribute{ "noncurrent_days": schema.Int64Attribute{ Optional: true, - Description: "Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action.", + Description: "Specifies the number of days an object is noncurrent before IONOS Object Storage can perform the associated action.", }, }, Validators: []validator.Object{ @@ -111,13 +111,13 @@ func (r *bucketLifecycleConfiguration) Schema(ctx context.Context, req resource. Attributes: map[string]schema.Attribute{ "days_after_initiation": schema.Int64Attribute{ Optional: true, - Description: "Specifies the number of days after which IONOS S3 Object Storage aborts an incomplete multipart upload.", + Description: "Specifies the number of days after which IONOS Object Storage Object Storage aborts an incomplete multipart upload.", }, }, Validators: []validator.Object{ objectvalidator.AlsoRequires(path.Expressions{path.MatchRelative().AtName("days_after_initiation")}...), }, - Description: "Specifies the days since the initiation of an incomplete multipart upload that IONOS S3 Object Storage will wait before permanently removing all parts of the upload.", + Description: "Specifies the days since the initiation of an incomplete multipart upload that IONOS Object Storage Object Storage will wait before permanently removing all parts of the upload.", }, }, }, @@ -132,11 +132,11 @@ func (r *bucketLifecycleConfiguration) Configure(_ context.Context, req resource return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -152,7 +152,7 @@ func (r *bucketLifecycleConfiguration) Create(ctx context.Context, req resource. return } - var data *s3.BucketLifecycleConfigurationModel + var data *objectstorage.BucketLifecycleConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -174,7 +174,7 @@ func (r *bucketLifecycleConfiguration) Read(ctx context.Context, req resource.Re return } - var data *s3.BucketLifecycleConfigurationModel + var data *objectstorage.BucketLifecycleConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -207,7 +207,7 @@ func (r *bucketLifecycleConfiguration) Update(ctx context.Context, req resource. return } - var data *s3.BucketLifecycleConfigurationModel + var data *objectstorage.BucketLifecycleConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -228,7 +228,7 @@ func (r *bucketLifecycleConfiguration) Delete(ctx context.Context, req resource. return } - var data *s3.BucketLifecycleConfigurationModel + var data *objectstorage.BucketLifecycleConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_lifecycle_configuration_test.go b/internal/framework/services/objectstorage/resource_bucket_lifecycle_configuration_test.go similarity index 98% rename from internal/framework/services/s3/resource_bucket_lifecycle_configuration_test.go rename to internal/framework/services/objectstorage/resource_bucket_lifecycle_configuration_test.go index 539784c61..ff049ae57 100644 --- a/internal/framework/services/s3/resource_bucket_lifecycle_configuration_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_lifecycle_configuration_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -280,7 +280,7 @@ resource "ionoscloud_s3_bucket_lifecycle_configuration" "test" { } func testAccCheckBucketLifecycleConfigurationDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -314,7 +314,7 @@ func testAccCheckLifecycleConfigurationExists(ctx context.Context, n string) res return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_object_lock_configuration.go b/internal/framework/services/objectstorage/resource_bucket_object_lock_configuration.go similarity index 93% rename from internal/framework/services/s3/resource_bucket_object_lock_configuration.go rename to internal/framework/services/objectstorage/resource_bucket_object_lock_configuration.go index 60be62247..87b029269 100644 --- a/internal/framework/services/s3/resource_bucket_object_lock_configuration.go +++ b/internal/framework/services/objectstorage/resource_bucket_object_lock_configuration.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" @@ -25,7 +25,7 @@ var ( ) type objectLockConfiguration struct { - client *s3.Client + client *objectstorage.Client } // NewObjectLockConfigurationResource creates a new resource for the bucket object lock configuration resource. @@ -101,11 +101,11 @@ func (r *objectLockConfiguration) Configure(_ context.Context, req resource.Conf return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.APIClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -121,7 +121,7 @@ func (r *objectLockConfiguration) Create(ctx context.Context, req resource.Creat return } - var data *s3.ObjectLockConfigurationModel + var data *objectstorage.ObjectLockConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -142,7 +142,7 @@ func (r *objectLockConfiguration) Read(ctx context.Context, req resource.ReadReq return } - var data *s3.ObjectLockConfigurationModel + var data *objectstorage.ObjectLockConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -175,7 +175,7 @@ func (r *objectLockConfiguration) Update(ctx context.Context, req resource.Updat return } - var data *s3.ObjectLockConfigurationModel + var data *objectstorage.ObjectLockConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -196,7 +196,7 @@ func (r *objectLockConfiguration) Delete(ctx context.Context, req resource.Delet return } - var data *s3.ObjectLockConfigurationModel + var data *objectstorage.ObjectLockConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_object_lock_configuration_test.go b/internal/framework/services/objectstorage/resource_bucket_object_lock_configuration_test.go similarity index 98% rename from internal/framework/services/s3/resource_bucket_object_lock_configuration_test.go rename to internal/framework/services/objectstorage/resource_bucket_object_lock_configuration_test.go index 0520898f2..b9d7e56fd 100644 --- a/internal/framework/services/s3/resource_bucket_object_lock_configuration_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_object_lock_configuration_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -194,7 +194,7 @@ resource "ionoscloud_s3_bucket_object_lock_configuration" "test" { } func testAccCheckBucketObjectLockConfigurationDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_policy.go b/internal/framework/services/objectstorage/resource_bucket_policy.go similarity index 81% rename from internal/framework/services/s3/resource_bucket_policy.go rename to internal/framework/services/objectstorage/resource_bucket_policy.go index 3f1455b43..34d9d2b2b 100644 --- a/internal/framework/services/s3/resource_bucket_policy.go +++ b/internal/framework/services/objectstorage/resource_bucket_policy.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" ) var ( @@ -25,7 +25,7 @@ func NewBucketPolicyResource() resource.Resource { } type bucketPolicyResource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the bucket policy resource. @@ -38,7 +38,7 @@ func (r *bucketPolicyResource) Schema(_ context.Context, req resource.SchemaRequ resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "bucket": schema.StringAttribute{ - Description: "Name of the S3 bucket to which this policy will be applied.", + Description: "Name of the bucket to which this policy will be applied.", Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), @@ -59,11 +59,11 @@ func (r *bucketPolicyResource) Configure(_ context.Context, req resource.Configu return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -75,11 +75,11 @@ func (r *bucketPolicyResource) Configure(_ context.Context, req resource.Configu // Create creates the bucket policy. func (r *bucketPolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") // todo: const for this error maybe? + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") // todo: const for this error maybe? return } - var data *s3.BucketPolicyModel + var data *objectstorage.BucketPolicyModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -96,11 +96,11 @@ func (r *bucketPolicyResource) Create(ctx context.Context, req resource.CreateRe // Read reads the bucket policy. func (r *bucketPolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPolicyModel + var data *objectstorage.BucketPolicyModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -130,10 +130,10 @@ func (r *bucketPolicyResource) ImportState(ctx context.Context, req resource.Imp // Update updates the bucket policy. func (r *bucketPolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPolicyModel + var data *objectstorage.BucketPolicyModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -150,11 +150,11 @@ func (r *bucketPolicyResource) Update(ctx context.Context, req resource.UpdateRe // Delete deletes the bucket. func (r *bucketPolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPolicyModel + var data *objectstorage.BucketPolicyModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_policy_test.go b/internal/framework/services/objectstorage/resource_bucket_policy_test.go similarity index 97% rename from internal/framework/services/s3/resource_bucket_policy_test.go rename to internal/framework/services/objectstorage/resource_bucket_policy_test.go index 2fa04651b..6100b4e96 100644 --- a/internal/framework/services/s3/resource_bucket_policy_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_policy_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -77,7 +77,7 @@ resource "ionoscloud_s3_bucket_policy" "test" { } func testAccCheckBucketPolicyDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_public_access_block.go b/internal/framework/services/objectstorage/resource_bucket_public_access_block.go similarity index 83% rename from internal/framework/services/s3/resource_bucket_public_access_block.go rename to internal/framework/services/objectstorage/resource_bucket_public_access_block.go index c96a9943d..90e69333f 100644 --- a/internal/framework/services/s3/resource_bucket_public_access_block.go +++ b/internal/framework/services/objectstorage/resource_bucket_public_access_block.go @@ -1,11 +1,11 @@ -package s3 +package objectstorage import ( "context" "errors" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/path" @@ -23,7 +23,7 @@ var ( ) // ErrBucketPublicAccessBlockNotFound returned for 404 -var ErrBucketPublicAccessBlockNotFound = errors.New("s3 bucket public access block not found") +var ErrBucketPublicAccessBlockNotFound = errors.New("object storage bucket public access block not found") // NewBucketPublicAccessBlockResource creates a new resource for the bucket public access block resource. func NewBucketPublicAccessBlockResource() resource.Resource { @@ -31,7 +31,7 @@ func NewBucketPublicAccessBlockResource() resource.Resource { } type bucketPublicAccessBlockResource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the bucket resource. @@ -80,11 +80,11 @@ func (r *bucketPublicAccessBlockResource) Configure(_ context.Context, req resou return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -96,11 +96,11 @@ func (r *bucketPublicAccessBlockResource) Configure(_ context.Context, req resou // Create creates the bucket. func (r *bucketPublicAccessBlockResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPublicAccessBlockResourceModel + var data *objectstorage.BucketPublicAccessBlockResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -117,11 +117,11 @@ func (r *bucketPublicAccessBlockResource) Create(ctx context.Context, req resour // Read reads the bucket. func (r *bucketPublicAccessBlockResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPublicAccessBlockResourceModel + var data *objectstorage.BucketPublicAccessBlockResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -150,11 +150,11 @@ func (r *bucketPublicAccessBlockResource) ImportState(ctx context.Context, req r // Update updates the bucket. func (r *bucketPublicAccessBlockResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPublicAccessBlockResourceModel + var data *objectstorage.BucketPublicAccessBlockResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -171,11 +171,11 @@ func (r *bucketPublicAccessBlockResource) Update(ctx context.Context, req resour // Delete deletes the bucket. func (r *bucketPublicAccessBlockResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketPublicAccessBlockResourceModel + var data *objectstorage.BucketPublicAccessBlockResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_public_access_block_test.go b/internal/framework/services/objectstorage/resource_bucket_public_access_block_test.go similarity index 96% rename from internal/framework/services/s3/resource_bucket_public_access_block_test.go rename to internal/framework/services/objectstorage/resource_bucket_public_access_block_test.go index fb5300d40..8fb3bb4a6 100644 --- a/internal/framework/services/s3/resource_bucket_public_access_block_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_public_access_block_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -57,7 +57,7 @@ func TestAccBucketPublicAccessBlockResource(t *testing.T) { } func testAccCheckBucketPublicAccessBlockDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_sse_configuration.go b/internal/framework/services/objectstorage/resource_bucket_sse_configuration.go similarity index 92% rename from internal/framework/services/s3/resource_bucket_sse_configuration.go rename to internal/framework/services/objectstorage/resource_bucket_sse_configuration.go index 2f8c31b6a..b2d3d2b21 100644 --- a/internal/framework/services/s3/resource_bucket_sse_configuration.go +++ b/internal/framework/services/objectstorage/resource_bucket_sse_configuration.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/path" @@ -21,7 +21,7 @@ var ( ) type serverSideEncryptionConfiguration struct { - client *s3.Client + client *objectstorage.Client } // NewServerSideEncryptionConfigurationResource creates a new resource for the server side encryption configuration resource. @@ -77,11 +77,11 @@ func (r *serverSideEncryptionConfiguration) Configure(_ context.Context, req res return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -97,7 +97,7 @@ func (r *serverSideEncryptionConfiguration) Create(ctx context.Context, req reso return } - var data *s3.ServerSideEncryptionConfigurationModel + var data *objectstorage.ServerSideEncryptionConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -118,7 +118,7 @@ func (r *serverSideEncryptionConfiguration) Read(ctx context.Context, req resour return } - var data *s3.ServerSideEncryptionConfigurationModel + var data *objectstorage.ServerSideEncryptionConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -151,7 +151,7 @@ func (r *serverSideEncryptionConfiguration) Update(ctx context.Context, req reso return } - var data *s3.ServerSideEncryptionConfigurationModel + var data *objectstorage.ServerSideEncryptionConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -172,7 +172,7 @@ func (r *serverSideEncryptionConfiguration) Delete(ctx context.Context, req reso return } - var data *s3.ServerSideEncryptionConfigurationModel + var data *objectstorage.ServerSideEncryptionConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_sse_configuration_test.go b/internal/framework/services/objectstorage/resource_bucket_sse_configuration_test.go similarity index 94% rename from internal/framework/services/s3/resource_bucket_sse_configuration_test.go rename to internal/framework/services/objectstorage/resource_bucket_sse_configuration_test.go index 1dea0b19a..700fd5a02 100644 --- a/internal/framework/services/s3/resource_bucket_sse_configuration_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_sse_configuration_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -69,7 +69,7 @@ resource "ionoscloud_s3_bucket_server_side_encryption_configuration" "test" { } func testAccCheckBucketSSEConfigurationDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -103,7 +103,7 @@ func testAccCheckSSEConfigurationExists(ctx context.Context, n string) resource. return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_test.go b/internal/framework/services/objectstorage/resource_bucket_test.go similarity index 97% rename from internal/framework/services/s3/resource_bucket_test.go rename to internal/framework/services/objectstorage/resource_bucket_test.go index d0574d005..bba0555e3 100644 --- a/internal/framework/services/s3/resource_bucket_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -270,7 +270,7 @@ resource "ionoscloud_s3_bucket_versioning" "bucket" { func testAccCheckBucketAddObjectsWithLegalHold(ctx context.Context, n string, keys ...string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -306,7 +306,7 @@ func testAccCheckBucketExists(ctx context.Context, n string) resource.TestCheckF return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -317,7 +317,7 @@ func testAccCheckBucketExists(ctx context.Context, n string) resource.TestCheckF } func testAccCheckBucketDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -347,7 +347,7 @@ func testAccCheckBucketDestroy(s *terraform.State) error { func testAccCheckBucketDeleteObjects(ctx context.Context, n string, keys ...string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -367,7 +367,7 @@ func testAccCheckBucketAddObjects(ctx context.Context, n string, keys ...string) return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_versioning.go b/internal/framework/services/objectstorage/resource_bucket_versioning.go similarity index 85% rename from internal/framework/services/s3/resource_bucket_versioning.go rename to internal/framework/services/objectstorage/resource_bucket_versioning.go index c9d5a53d6..7052aca9a 100644 --- a/internal/framework/services/s3/resource_bucket_versioning.go +++ b/internal/framework/services/objectstorage/resource_bucket_versioning.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/path" @@ -22,7 +22,7 @@ var ( ) type bucketVersioningResource struct { - client *s3.Client + client *objectstorage.Client } // NewBucketVersioningResource creates a new resource for the bucket versioning resource. @@ -75,11 +75,11 @@ func (r *bucketVersioningResource) Configure(_ context.Context, req resource.Con return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -91,11 +91,11 @@ func (r *bucketVersioningResource) Configure(_ context.Context, req resource.Con // Create creates the bucket versioning resource. func (r *bucketVersioningResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketVersioningResourceModel + var data *objectstorage.BucketVersioningResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -112,11 +112,11 @@ func (r *bucketVersioningResource) Create(ctx context.Context, req resource.Crea // Read reads the bucket versioning resource. func (r *bucketVersioningResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketVersioningResourceModel + var data *objectstorage.BucketVersioningResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -145,11 +145,11 @@ func (r *bucketVersioningResource) ImportState(ctx context.Context, req resource // Update updates the bucket versioning resource. func (r *bucketVersioningResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketVersioningResourceModel + var data *objectstorage.BucketVersioningResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -166,11 +166,11 @@ func (r *bucketVersioningResource) Update(ctx context.Context, req resource.Upda // Delete deletes the bucket versioning resource. func (r *bucketVersioningResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.BucketVersioningResourceModel + var data *objectstorage.BucketVersioningResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_versioning_test.go b/internal/framework/services/objectstorage/resource_bucket_versioning_test.go similarity index 94% rename from internal/framework/services/s3/resource_bucket_versioning_test.go rename to internal/framework/services/objectstorage/resource_bucket_versioning_test.go index 0f91d5832..8b9605046 100644 --- a/internal/framework/services/s3/resource_bucket_versioning_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_versioning_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -66,7 +66,7 @@ resource "ionoscloud_s3_bucket_versioning" "test" { } func testAccCheckBucketVersioningDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_bucket_website_configuration.go b/internal/framework/services/objectstorage/resource_bucket_website_configuration.go similarity index 96% rename from internal/framework/services/s3/resource_bucket_website_configuration.go rename to internal/framework/services/objectstorage/resource_bucket_website_configuration.go index 6fc1df43b..75f0d2f6d 100644 --- a/internal/framework/services/s3/resource_bucket_website_configuration.go +++ b/internal/framework/services/objectstorage/resource_bucket_website_configuration.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" @@ -23,7 +23,7 @@ var ( ) type bucketWebsiteConfiguration struct { - client *s3.Client + client *objectstorage.Client } func (r *bucketWebsiteConfiguration) ConfigValidators(ctx context.Context) []resource.ConfigValidator { @@ -161,11 +161,11 @@ func (r *bucketWebsiteConfiguration) Configure(_ context.Context, req resource.C return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -181,7 +181,7 @@ func (r *bucketWebsiteConfiguration) Create(ctx context.Context, req resource.Cr return } - var data *s3.BucketWebsiteConfigurationModel + var data *objectstorage.BucketWebsiteConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -202,7 +202,7 @@ func (r *bucketWebsiteConfiguration) Read(ctx context.Context, req resource.Read return } - var data *s3.BucketWebsiteConfigurationModel + var data *objectstorage.BucketWebsiteConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -235,7 +235,7 @@ func (r *bucketWebsiteConfiguration) Update(ctx context.Context, req resource.Up return } - var data *s3.BucketWebsiteConfigurationModel + var data *objectstorage.BucketWebsiteConfigurationModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -256,7 +256,7 @@ func (r *bucketWebsiteConfiguration) Delete(ctx context.Context, req resource.De return } - var data *s3.BucketWebsiteConfigurationModel + var data *objectstorage.BucketWebsiteConfigurationModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_bucket_website_configuration_test.go b/internal/framework/services/objectstorage/resource_bucket_website_configuration_test.go similarity index 97% rename from internal/framework/services/s3/resource_bucket_website_configuration_test.go rename to internal/framework/services/objectstorage/resource_bucket_website_configuration_test.go index de1a1c4ff..d299373ef 100644 --- a/internal/framework/services/s3/resource_bucket_website_configuration_test.go +++ b/internal/framework/services/objectstorage/resource_bucket_website_configuration_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -160,7 +160,7 @@ resource "ionoscloud_s3_bucket_website_configuration" "test" { } func testAccCheckBucketWebsiteConfigurationDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -194,7 +194,7 @@ func testAccCheckWebsiteConfigurationExists(ctx context.Context, n string) resou return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_object.go b/internal/framework/services/objectstorage/resource_object.go similarity index 87% rename from internal/framework/services/s3/resource_object.go rename to internal/framework/services/objectstorage/resource_object.go index f1b04d5c7..3a40c5d77 100644 --- a/internal/framework/services/s3/resource_object.go +++ b/internal/framework/services/objectstorage/resource_object.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -19,7 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" ) var ( @@ -34,7 +34,7 @@ func NewObjectResource() resource.Resource { } type objectResource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the object resource. @@ -96,10 +96,10 @@ func (r *objectResource) Schema(_ context.Context, req resource.SchemaRequest, r Optional: true, }, "server_side_encryption": schema.StringAttribute{ - Description: "The server-side encryption algorithm used when storing this object in IONOS S3 Object Storage (AES256).", + Description: "The server-side encryption algorithm used when storing this object in IONOS Object Storage Object Storage (AES256).", Optional: true, Computed: true, - Validators: []validator.String{stringvalidator.OneOf("AES256")}, + Validators: []validator.String{stringvalidator.OneOf("", "AES256")}, }, "storage_class": schema.StringAttribute{ Description: "The storage class of the object. Valid value is 'STANDARD'.", @@ -109,7 +109,7 @@ func (r *objectResource) Schema(_ context.Context, req resource.SchemaRequest, r Validators: []validator.String{stringvalidator.OneOf("STANDARD")}, }, "website_redirect": schema.StringAttribute{ - Description: "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. IONOS S3 Object Storage stores the value of this header in the object metadata", + Description: "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. IONOS Object Storage Object Storage stores the value of this header in the object metadata", Optional: true, }, "server_side_encryption_customer_algorithm": schema.StringAttribute{ @@ -122,11 +122,11 @@ func (r *objectResource) Schema(_ context.Context, req resource.SchemaRequest, r Optional: true, }, "server_side_encryption_customer_key_md5": schema.StringAttribute{ - Description: "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IONOS S3 Object Storage uses this header for a message integrity check to ensure that the encryption key was transmitted without error", + Description: "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IONOS Object Storage Object Storage uses this header for a message integrity check to ensure that the encryption key was transmitted without error", Optional: true, }, "server_side_encryption_context": schema.StringAttribute{ - Description: " Specifies the IONOS S3 Object Storage Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.", + Description: " Specifies the IONOS Object Storage Object Storage Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.", Optional: true, Sensitive: true, }, @@ -158,7 +158,7 @@ func (r *objectResource) Schema(_ context.Context, req resource.SchemaRequest, r ElementType: types.StringType, }, "metadata": schema.MapAttribute{ - Description: "A map of metadata to store with the object in IONOS S3 Object Storage", + Description: "A map of metadata to store with the object in IONOS Object Storage Object Storage", Optional: true, ElementType: types.StringType, Validators: []validator.Map{mapvalidator.ValueStringsAre([]validator.String{ @@ -189,11 +189,11 @@ func (r *objectResource) Configure(_ context.Context, req resource.ConfigureRequ return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -214,11 +214,11 @@ func (r *objectResource) ConfigValidators(_ context.Context) []resource.ConfigVa // Create creates the object. func (r *objectResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.ObjectResourceModel + var data *objectstorage.ObjectResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -241,11 +241,11 @@ func (r *objectResource) Create(ctx context.Context, req resource.CreateRequest, // Read reads the object. func (r *objectResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.ObjectResourceModel + var data *objectstorage.ObjectResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -281,7 +281,7 @@ func (r *objectResource) ImportState(ctx context.Context, req resource.ImportSta // Update updates the object. func (r *objectResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var plan, state *s3.ObjectResourceModel + var plan, state *objectstorage.ObjectResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { @@ -299,11 +299,11 @@ func (r *objectResource) Update(ctx context.Context, req resource.UpdateRequest, // Delete deletes the object. func (r *objectResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.ObjectResourceModel + var data *objectstorage.ObjectResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_object_copy.go b/internal/framework/services/objectstorage/resource_object_copy.go similarity index 89% rename from internal/framework/services/s3/resource_object_copy.go rename to internal/framework/services/objectstorage/resource_object_copy.go index fde577677..7b686ab13 100644 --- a/internal/framework/services/s3/resource_object_copy.go +++ b/internal/framework/services/objectstorage/resource_object_copy.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/s3" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/services/objectstorage" ) var ( @@ -31,7 +31,7 @@ func NewObjectCopyResource() resource.Resource { } type objectCopyResource struct { - client *s3.Client + client *objectstorage.Client } // Metadata returns the metadata for the object copy resource. @@ -119,7 +119,7 @@ func (r *objectCopyResource) Schema(_ context.Context, req resource.SchemaReques Validators: []validator.String{stringvalidator.OneOf("COPY", "REPLACE")}, }, "server_side_encryption": schema.StringAttribute{ - Description: "The server-side encryption algorithm used when storing this object copy in IONOS S3 Object Copy Storage (AES256).", + Description: "The server-side encryption algorithm used when storing this object copy in IONOS Object Storage Object Copy Storage (AES256).", Optional: true, Validators: []validator.String{stringvalidator.OneOf("AES256")}, }, @@ -131,7 +131,7 @@ func (r *objectCopyResource) Schema(_ context.Context, req resource.SchemaReques Validators: []validator.String{stringvalidator.OneOf("STANDARD")}, }, "website_redirect": schema.StringAttribute{ - Description: "If the bucket is configured as a website, redirects requests for this object copy to another object copy in the same bucket or to an external URL. IONOS S3 Object Copy Storage stores the value of this header in the object copy metadata", + Description: "If the bucket is configured as a website, redirects requests for this object copy to another object copy in the same bucket or to an external URL. IONOS Object Storage Object Copy Storage stores the value of this header in the object copy metadata", Optional: true, }, "server_side_encryption_customer_algorithm": schema.StringAttribute{ @@ -144,7 +144,7 @@ func (r *objectCopyResource) Schema(_ context.Context, req resource.SchemaReques Optional: true, }, "server_side_encryption_customer_key_md5": schema.StringAttribute{ - Description: "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IONOS S3 Object Copy Storage uses this header for a message integrity check to ensure that the encryption key was transmitted without error", + Description: "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IONOS Object Storage Object Copy Storage uses this header for a message integrity check to ensure that the encryption key was transmitted without error", Optional: true, }, "source_customer_algorithm": schema.StringAttribute{ @@ -157,7 +157,7 @@ func (r *objectCopyResource) Schema(_ context.Context, req resource.SchemaReques Optional: true, }, "source_customer_key_md5": schema.StringAttribute{ - Description: "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IONOS S3 Object Copy Storage uses this header for a message integrity check to ensure that the encryption key was transmitted without error", + Description: "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IONOS Object Storage Object Copy Storage uses this header for a message integrity check to ensure that the encryption key was transmitted without error", Optional: true, }, "object_lock_mode": schema.StringAttribute{ @@ -188,7 +188,7 @@ func (r *objectCopyResource) Schema(_ context.Context, req resource.SchemaReques ElementType: types.StringType, }, "metadata": schema.MapAttribute{ - Description: "A map of metadata to store with the object copy in IONOS S3 Object Copy Storage", + Description: "A map of metadata to store with the object copy in IONOS Object Storage Object Copy Storage", Optional: true, ElementType: types.StringType, Validators: []validator.Map{mapvalidator.ValueStringsAre([]validator.String{ @@ -215,11 +215,11 @@ func (r *objectCopyResource) Configure(_ context.Context, req resource.Configure return } - client, ok := req.ProviderData.(*s3.Client) + client, ok := req.ProviderData.(*objectstorage.Client) if !ok { resp.Diagnostics.AddError( "Unexpected Data Source Configure Type", - fmt.Sprintf("Expected *s3.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *objectstorage.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return @@ -231,11 +231,11 @@ func (r *objectCopyResource) Configure(_ context.Context, req resource.Configure // Create creates the object copy. func (r *objectCopyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.ObjectCopyResourceModel + var data *objectstorage.ObjectCopyResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -252,11 +252,11 @@ func (r *objectCopyResource) Create(ctx context.Context, req resource.CreateRequ // Read reads the object copy. func (r *objectCopyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.ObjectCopyResourceModel + var data *objectstorage.ObjectCopyResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return @@ -292,7 +292,7 @@ func (r *objectCopyResource) ImportState(ctx context.Context, req resource.Impor // Update updates the object copy. func (r *objectCopyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var plan, state *s3.ObjectCopyResourceModel + var plan, state *objectstorage.ObjectCopyResourceModel // Read Terraform plan data into the model resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) @@ -312,11 +312,11 @@ func (r *objectCopyResource) Update(ctx context.Context, req resource.UpdateRequ // Delete deletes the object copy. func (r *objectCopyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { if r.client == nil { - resp.Diagnostics.AddError("s3 api client not configured", "The provider client is not configured") + resp.Diagnostics.AddError("object storage api client not configured", "The provider client is not configured") return } - var data *s3.ObjectCopyResourceModel + var data *objectstorage.ObjectCopyResourceModel resp.Diagnostics.Append(req.State.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return diff --git a/internal/framework/services/s3/resource_object_copy_test.go b/internal/framework/services/objectstorage/resource_object_copy_test.go similarity index 96% rename from internal/framework/services/s3/resource_object_copy_test.go rename to internal/framework/services/objectstorage/resource_object_copy_test.go index 5a352d932..3ab618a1c 100644 --- a/internal/framework/services/s3/resource_object_copy_test.go +++ b/internal/framework/services/objectstorage/resource_object_copy_test.go @@ -1,7 +1,7 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" @@ -13,7 +13,7 @@ import ( "testing" ) -func TestAccS3ObjectCopy_basic(t *testing.T) { +func TestAccObjectCopy_basic(t *testing.T) { ctx := context.Background() rNameSource := acctest.GenerateRandomResourceName(bucketPrefix) rNameTarget := acctest.GenerateRandomResourceName(bucketPrefix) @@ -63,7 +63,7 @@ func TestAccS3ObjectCopy_basic(t *testing.T) { }) } -func TestAccS3ObjectCopy_metadata(t *testing.T) { +func TestAccObjectCopy_metadata(t *testing.T) { ctx := context.Background() rName1 := acctest.GenerateRandomResourceName(bucketPrefix) rName2 := acctest.GenerateRandomResourceName(bucketPrefix) @@ -89,7 +89,7 @@ func TestAccS3ObjectCopy_metadata(t *testing.T) { }) } -func TestAccS3ObjectCopy_sourceWithSlashes(t *testing.T) { +func TestAccObjectCopy_sourceWithSlashes(t *testing.T) { ctx := context.Background() rName1 := acctest.GenerateRandomResourceName(bucketPrefix) rName2 := acctest.GenerateRandomResourceName(bucketPrefix) @@ -120,7 +120,7 @@ func TestAccS3ObjectCopy_sourceWithSlashes(t *testing.T) { }) } -func TestAccS3ObjectCopy_objectLockLegalHold(t *testing.T) { +func TestAccObjectCopy_objectLockLegalHold(t *testing.T) { ctx := context.Background() rName1 := acctest.GenerateRandomResourceName(bucketPrefix) rName2 := acctest.GenerateRandomResourceName(bucketPrefix) @@ -151,7 +151,7 @@ func TestAccS3ObjectCopy_objectLockLegalHold(t *testing.T) { }) } -func TestAccS3ObjectCopy_targetWithMultipleSlashes(t *testing.T) { +func TestAccObjectCopy_targetWithMultipleSlashes(t *testing.T) { ctx := context.Background() rName1 := acctest.GenerateRandomResourceName(bucketPrefix) rName2 := acctest.GenerateRandomResourceName(bucketPrefix) @@ -178,7 +178,7 @@ func TestAccS3ObjectCopy_targetWithMultipleSlashes(t *testing.T) { func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -213,7 +213,7 @@ func testAccCheckObjectCopyExists(ctx context.Context, n string) resource.TestCh return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/resource_object_test.go b/internal/framework/services/objectstorage/resource_object_test.go similarity index 98% rename from internal/framework/services/s3/resource_object_test.go rename to internal/framework/services/objectstorage/resource_object_test.go index c45ac3b3b..0c7a92694 100644 --- a/internal/framework/services/s3/resource_object_test.go +++ b/internal/framework/services/objectstorage/resource_object_test.go @@ -1,14 +1,14 @@ -//go:build all || s3 -// +build all s3 +//go:build all || objectstorage +// +build all objectstorage -package s3_test +package objectstorage_test import ( "context" "fmt" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils" "io" "os" @@ -57,7 +57,7 @@ func TestAccObjectResourceBasic(t *testing.T) { ImportStateId: fmt.Sprintf("%s/%s", bucket, key), ImportState: true, ImportStateVerifyIdentifierAttribute: "key", - ImportStateVerifyIgnore: []string{"force_destroy", "content"}, + ImportStateVerifyIgnore: []string{"force_destroy", "content", "server_side_encryption"}, ImportStateIdFunc: func(s *terraform.State) (string, error) { rs, ok := s.RootModule().Resources[objectResourceName] if !ok { @@ -686,7 +686,7 @@ func testAccCheckObjectExists(ctx context.Context, n string, body *string) resou return fmt.Errorf("Not Found: %s", n) } - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } @@ -707,7 +707,7 @@ func testAccCheckObjectExists(ctx context.Context, n string, body *string) resou } } -func buildGetObjectRequest(ctx context.Context, client *s3.APIClient, attributes map[string]string) s3.ApiGetObjectRequest { +func buildGetObjectRequest(ctx context.Context, client *objstorage.APIClient, attributes map[string]string) objstorage.ApiGetObjectRequest { req := client.ObjectsApi.GetObject(ctx, attributes["bucket"], attributes["key"]) if attributes["version_id"] != "" { req = req.VersionId(attributes["version_id"]) @@ -735,7 +735,7 @@ func buildGetObjectRequest(ctx context.Context, client *s3.APIClient, attributes func testAccCheckObjectBody(got *string, want string) resource.TestCheckFunc { return func(s *terraform.State) error { if *got != want { - return fmt.Errorf("S3 Object body = %v, want %v", got, want) + return fmt.Errorf("object body = %v, want %v", got, want) } return nil @@ -759,7 +759,7 @@ func testAccObjectCreateTempFile(t *testing.T, data string) string { } func testAccCheckObjectDestroy(s *terraform.State) error { - client, err := acctest.S3Client() + client, err := acctest.ObjectStorageClient() if err != nil { return err } diff --git a/internal/framework/services/s3/errors.go b/internal/framework/services/s3/errors.go deleted file mode 100644 index d636ca9af..000000000 --- a/internal/framework/services/s3/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -package s3 - -import ( - "errors" - "fmt" - - s3 "github.com/ionos-cloud/sdk-go-s3" -) - -func formatXMLError(err error) error { - var apiErr s3.GenericOpenAPIError - if errors.As(err, &apiErr) { - if s3Error, ok := apiErr.Model().(s3.Error); ok { - msg := "" - if s3Error.Code != nil { - msg += fmt.Sprintf("code:%s\n", *s3Error.Code) - } - if s3Error.Message != nil { - msg += fmt.Sprintf("message:%s\n", *s3Error.Message) - } - if s3Error.HostId != nil { - msg += fmt.Sprintf("host:%s\n", *s3Error.HostId) - } - if s3Error.RequestId != nil { - msg += fmt.Sprintf("request:%s\n", *s3Error.RequestId) - } - return errors.New(msg) - } - } - return err -} diff --git a/internal/tags/tags.go b/internal/tags/tags.go index 11d7d3051..9e726df7a 100644 --- a/internal/tags/tags.go +++ b/internal/tags/tags.go @@ -5,14 +5,14 @@ import ( "fmt" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // KeyValueTags is a map of key-value tags. type KeyValueTags map[string]string -// New creates a new KeyValueTags from a list of s3.Tag. -func New(tags []s3.Tag) KeyValueTags { +// New creates a new KeyValueTags from a list of objstorage.Tag. +func New(tags []objstorage.Tag) KeyValueTags { result := make(KeyValueTags) for _, tag := range tags { @@ -77,18 +77,18 @@ func (t KeyValueTags) Ignore(ignoreTags KeyValueTags) KeyValueTags { return result } -// ToList converts KeyValueTags to a list of s3.Tag. -func (t KeyValueTags) ToList() []s3.Tag { - tags := make([]s3.Tag, 0, len(t)) +// ToList converts KeyValueTags to a list of objstorage.Tag. +func (t KeyValueTags) ToList() []objstorage.Tag { + tags := make([]objstorage.Tag, 0, len(t)) for key, value := range t { - tags = append(tags, s3.Tag{Key: s3.PtrString(key), Value: s3.PtrString(value)}) + tags = append(tags, objstorage.Tag{Key: objstorage.PtrString(key), Value: objstorage.PtrString(value)}) } return tags } -// ToListPointer converts KeyValueTags to a pointer to a list of s3.Tag. -func (t KeyValueTags) ToListPointer() *[]s3.Tag { +// ToListPointer converts KeyValueTags to a pointer to a list of objstorage.Tag. +func (t KeyValueTags) ToListPointer() *[]objstorage.Tag { tags := t.ToList() return &tags } diff --git a/ionoscloud/data_source_dbaas_pgsql_backups.go b/ionoscloud/data_source_dbaas_pgsql_backups.go index 2859f5b6b..873f54447 100644 --- a/ionoscloud/data_source_dbaas_pgsql_backups.go +++ b/ionoscloud/data_source_dbaas_pgsql_backups.go @@ -47,7 +47,7 @@ func dataSourceDbaasPgSqlBackups() *schema.Resource { }, "location": { Type: schema.TypeString, - Description: "The S3 location where the backups will be stored.", + Description: "The Object Storage location where the backups will be stored.", Computed: true, }, "version": { diff --git a/ionoscloud/data_source_dbaas_pgsql_cluster.go b/ionoscloud/data_source_dbaas_pgsql_cluster.go index e7fe24586..8176ca87d 100644 --- a/ionoscloud/data_source_dbaas_pgsql_cluster.go +++ b/ionoscloud/data_source_dbaas_pgsql_cluster.go @@ -109,7 +109,7 @@ func dataSourceDbaasPgSqlCluster() *schema.Resource { }, "backup_location": { Type: schema.TypeString, - Description: "The S3 location where the backups will be stored.", + Description: "The Object Storage location where the backups will be stored.", Computed: true, }, "maintenance_window": { diff --git a/ionoscloud/data_source_k8s_cluster.go b/ionoscloud/data_source_k8s_cluster.go index 8847eb981..5902284e0 100644 --- a/ionoscloud/data_source_k8s_cluster.go +++ b/ionoscloud/data_source_k8s_cluster.go @@ -242,13 +242,13 @@ func dataSourceK8sClusterSchema() map[string]*schema.Schema { }, "s3_buckets": { Type: schema.TypeList, - Description: "List of S3 bucket configured for K8s usage. For now it contains only an S3 bucket used to store K8s API audit logs.", + Description: "List of Object Storage bucket configured for K8s usage. For now it contains only an Object Storage bucket used to store K8s API audit logs.", Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, - Description: "Name of the S3 bucket", + Description: "Name of the Object Storage bucket", Required: true, }, }, diff --git a/ionoscloud/data_source_s3_key.go b/ionoscloud/data_source_s3_key.go index c8d8bcc12..50cef9584 100644 --- a/ionoscloud/data_source_s3_key.go +++ b/ionoscloud/data_source_s3_key.go @@ -9,13 +9,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -func dataSourceS3Key() *schema.Resource { +func dataSourceObjectStorageKey() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceS3KeyRead, + ReadContext: dataSourceObjectStorageKeyRead, Schema: map[string]*schema.Schema{ "id": { Type: schema.TypeString, - Description: "Id of the s3 key.", + Description: "Id of the key.", Optional: true, }, "user_id": { @@ -26,7 +26,7 @@ func dataSourceS3Key() *schema.Resource { }, "secret_key": { Type: schema.TypeString, - Description: "The S3 Secret key.", + Description: "The Secret key.", Computed: true, }, "active": { @@ -40,12 +40,12 @@ func dataSourceS3Key() *schema.Resource { } } -func dataSourceS3KeyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func dataSourceObjectStorageKeyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { id, idOk := d.GetOk("id") if !idOk { - return diag.FromErr(fmt.Errorf("please provide the s3 key id")) + return diag.FromErr(fmt.Errorf("please provide the object storage key id")) } d.SetId(id.(string)) diff --git a/ionoscloud/import_s3_keys_test.go b/ionoscloud/import_s3_keys_test.go index 3d9f3c0a9..64eac4b1c 100644 --- a/ionoscloud/import_s3_keys_test.go +++ b/ionoscloud/import_s3_keys_test.go @@ -12,7 +12,7 @@ import ( "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/constant" ) -func TestAccS3KeyImportBasic(t *testing.T) { +func TestAccKeyImportBasic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, ProtoV6ProviderFactories: testAccProtoV6ProviderFactoriesInternal(t, &testAccProvider), @@ -25,14 +25,14 @@ func TestAccS3KeyImportBasic(t *testing.T) { ResourceName: constant.S3KeyResource + "." + constant.S3KeyTestResource, ImportState: true, ImportStateVerify: true, - ImportStateIdFunc: testAccS3KeyImportStateID, + ImportStateIdFunc: testAccKeyImportStateID, ImportStateVerifyIgnore: []string{}, }, }, }) } -func testAccS3KeyImportStateID(s *terraform.State) (string, error) { +func testAccKeyImportStateID(s *terraform.State) (string, error) { var importID = "" for _, rs := range s.RootModule().Resources { diff --git a/ionoscloud/provider.go b/ionoscloud/provider.go index 6cb862887..c74c23276 100644 --- a/ionoscloud/provider.go +++ b/ionoscloud/provider.go @@ -8,7 +8,7 @@ import ( "os" "runtime" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -91,20 +91,20 @@ func Provider() *schema.Provider { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("IONOS_S3_ACCESS_KEY", nil), - Description: "Access key for IONOS S3 operations.", + Description: "Access key for IONOS Object Storage operations.", }, "s3_secret_key": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("IONOS_S3_SECRET_KEY", nil), - Description: "Secret key for IONOS S3 operations.", + Description: "Secret key for IONOS Object Storage operations.", }, "s3_region": { Type: schema.TypeString, Optional: true, Default: "eu-central-3", DefaultFunc: schema.EnvDefaultFunc("IONOS_S3_REGION", nil), - Description: "Region for IONOS S3 operations.", + Description: "Region for IONOS Object Storage operations.", }, }, ResourcesMap: map[string]*schema.Resource{ @@ -193,7 +193,7 @@ func Provider() *schema.Provider { constant.TemplateResource: dataSourceTemplate(), constant.BackupUnitResource: dataSourceBackupUnit(), constant.FirewallResource: dataSourceFirewall(), - constant.S3KeyResource: dataSourceS3Key(), + constant.S3KeyResource: dataSourceObjectStorageKey(), constant.GroupResource: dataSourceGroup(), constant.UserResource: dataSourceUser(), constant.IpBlockResource: dataSourceIpBlock(), @@ -384,7 +384,7 @@ func NewClientByType(clientOpts ClientOptions, clientType clientType) interface{ case psqlClient: return dbaasService.NewPsqlClient(clientOpts.Username, clientOpts.Password, clientOpts.Token, clientOpts.Url, clientOpts.Version, clientOpts.Username) case s3Client: - return s3.NewAPIClient(s3.NewConfiguration()) + return objstorage.NewAPIClient(objstorage.NewConfiguration()) case kafkaClient: return kafkaService.NewClient(clientOpts.Username, clientOpts.Password, clientOpts.Token, clientOpts.Url, clientOpts.Version, clientOpts.Username) case apiGatewayClient: diff --git a/ionoscloud/resource_dbaas_pgsql_cluster.go b/ionoscloud/resource_dbaas_pgsql_cluster.go index 9fc187add..743681a69 100644 --- a/ionoscloud/resource_dbaas_pgsql_cluster.go +++ b/ionoscloud/resource_dbaas_pgsql_cluster.go @@ -119,7 +119,7 @@ func resourceDbaasPgSqlCluster() *schema.Resource { }, "backup_location": { Type: schema.TypeString, - Description: "The S3 location where the backups will be stored.", + Description: "The Object Storage location where the backups will be stored.", Optional: true, Computed: true, ValidateDiagFunc: validation.ToDiagFunc(validation.StringInSlice([]string{"de", "eu-south-2", "eu-central-2"}, true)), diff --git a/ionoscloud/resource_k8s_cluster.go b/ionoscloud/resource_k8s_cluster.go index 1ce6829c6..43e790159 100644 --- a/ionoscloud/resource_k8s_cluster.go +++ b/ionoscloud/resource_k8s_cluster.go @@ -118,13 +118,13 @@ func resourcek8sCluster() *schema.Resource { }, "s3_buckets": { Type: schema.TypeList, - Description: "List of S3 bucket configured for K8s usage. For now it contains only an S3 bucket used to store K8s API audit logs.", + Description: "List of Object Storage bucket configured for K8s usage. For now it contains only an Object Storage bucket used to store K8s API audit logs.", Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, - Description: "Name of the S3 bucket", + Description: "Name of the Object Storage bucket", Optional: true, }, }, @@ -236,7 +236,7 @@ func resourcek8sClusterCreate(ctx context.Context, d *schema.ResourceData, meta s3Bucket.Name = &name addBucket = true } else { - diags := diag.FromErr(fmt.Errorf("name must be provided for s3 bucket")) + diags := diag.FromErr(fmt.Errorf("name must be provided for Object Storage bucket")) return diags } if addBucket { diff --git a/ionoscloud/resource_s3_key.go b/ionoscloud/resource_s3_key.go index 077f444cf..3e5455646 100644 --- a/ionoscloud/resource_s3_key.go +++ b/ionoscloud/resource_s3_key.go @@ -35,7 +35,7 @@ func resourceS3Key() *schema.Resource { }, "secret_key": { Type: schema.TypeString, - Description: "The S3 Secret key.", + Description: "The Object Storage Secret key.", Computed: true, }, "active": { @@ -58,12 +58,12 @@ func resourceS3KeyCreate(ctx context.Context, d *schema.ResourceData, meta inter if err != nil { d.SetId("") - diags := diag.FromErr(fmt.Errorf("error creating S3 key: %w", err)) + diags := diag.FromErr(fmt.Errorf("error creating Object Storage key: %w", err)) return diags } if rsp.Id == nil { - return diag.FromErr(fmt.Errorf("the API didn't return an s3 key ID")) + return diag.FromErr(fmt.Errorf("the API didn't return an Object Storage key ID")) } keyId := *rsp.Id d.SetId(keyId) @@ -71,7 +71,7 @@ func resourceS3KeyCreate(ctx context.Context, d *schema.ResourceData, meta inter return diag.FromErr(errState) } - log.Printf("[INFO] Created S3 key: %s", d.Id()) + log.Printf("[INFO] Created Object Storage key: %s", d.Id()) active := d.Get("active").(bool) s3Key := ionoscloud.S3Key{ @@ -106,14 +106,14 @@ func resourceS3KeyRead(ctx context.Context, d *schema.ResourceData, meta interfa d.SetId("") return nil } - diags := diag.FromErr(fmt.Errorf("error while reading S3 key %s: %w, %+v", d.Id(), err, s3Key)) + diags := diag.FromErr(fmt.Errorf("error while reading Object Storage key %s: %w, %+v", d.Id(), err, s3Key)) return diags } - log.Printf("[INFO] Successfully retrieved S3 key %+v \n", *s3Key.Id) + log.Printf("[INFO] Successfully retrieved Object Storage key %+v \n", *s3Key.Id) if s3Key.HasProperties() && s3Key.Properties.HasActive() { - log.Printf("[INFO] Successfully retrieved S3 key with status: %t", *s3Key.Properties.Active) + log.Printf("[INFO] Successfully retrieved Object Storage key with status: %t", *s3Key.Properties.Active) } if err := setS3KeyIdAndProperties(&s3Key, d); err != nil { @@ -129,10 +129,10 @@ func resourceS3KeyUpdate(ctx context.Context, d *schema.ResourceData, meta inter request := ionoscloud.S3Key{} request.Properties = &ionoscloud.S3KeyProperties{} - log.Printf("[INFO] Attempting to update S3 key %s", d.Id()) + log.Printf("[INFO] Attempting to update Object Storage key %s", d.Id()) newActiveSetting := d.Get("active") - log.Printf("[INFO] S3 key active setting changed to %+v", newActiveSetting) + log.Printf("[INFO] Object Storage key active setting changed to %+v", newActiveSetting) active := newActiveSetting.(bool) request.Properties.Active = &active @@ -145,7 +145,7 @@ func resourceS3KeyUpdate(ctx context.Context, d *schema.ResourceData, meta inter d.SetId("") return nil } - diags := diag.FromErr(fmt.Errorf("error while updating S3 key %s: %w", d.Id(), err)) + diags := diag.FromErr(fmt.Errorf("error while updating Object Storage key %s: %w", d.Id(), err)) return diags } @@ -168,7 +168,7 @@ func resourceS3KeyDelete(ctx context.Context, d *schema.ResourceData, meta inter d.SetId("") return nil } - diags := diag.FromErr(fmt.Errorf("error while deleting S3 key %s: %w", d.Id(), err)) + diags := diag.FromErr(fmt.Errorf("error while deleting Object Storage key %s: %w", d.Id(), err)) return diags } @@ -178,12 +178,12 @@ func resourceS3KeyDelete(ctx context.Context, d *schema.ResourceData, meta inter s3KeyDeleted, dsErr := s3KeyDeleted(ctx, client, d) if dsErr != nil { - diags := diag.FromErr(fmt.Errorf("error while checking deletion status of S3 key %s: %w", d.Id(), dsErr)) + diags := diag.FromErr(fmt.Errorf("error while checking deletion status of Object Storage key %s: %w", d.Id(), dsErr)) return diags } if s3KeyDeleted { - log.Printf("[INFO] Successfully deleted S3 key: %s", d.Id()) + log.Printf("[INFO] Successfully deleted Object Storage key: %s", d.Id()) break } @@ -192,7 +192,7 @@ func resourceS3KeyDelete(ctx context.Context, d *schema.ResourceData, meta inter log.Printf("[INFO] trying again ...") case <-ctx.Done(): log.Printf("[INFO] delete timed out") - diags := diag.FromErr(fmt.Errorf("s3 key delete timed out! WARNING: your s3 key will still probably be deleted after some time but the terraform state won't reflect that; check your Ionos Cloud account for updates")) + diags := diag.FromErr(fmt.Errorf("Object Storage key delete timed out! WARNING: your Object Storage key will still probably be deleted after some time but the terraform state won't reflect that; check your Ionos Cloud account for updates")) return diags } } @@ -209,7 +209,7 @@ func s3KeyDeleted(ctx context.Context, client *ionoscloud.APIClient, d *schema.R if httpNotFound(apiResponse) { return true, nil } - return true, fmt.Errorf("error checking S3 key deletion status: %w", err) + return true, fmt.Errorf("error checking Object Storage key deletion status: %w", err) } return false, nil } @@ -220,7 +220,7 @@ func s3Ready(ctx context.Context, client *ionoscloud.APIClient, d *schema.Resour logApiRequestTime(apiResponse) if err != nil { - return true, fmt.Errorf("error checking S3 Key status: %w", err) + return true, fmt.Errorf("error checking Object Storage Key status: %w", err) } active := d.Get("active").(bool) return *rsp.Properties.Active == active, nil @@ -244,9 +244,9 @@ func resourceS3KeyImport(ctx context.Context, d *schema.ResourceData, meta inter if err != nil { if httpNotFound(apiResponse) { d.SetId("") - return nil, fmt.Errorf("unable to find S3 key %q", keyId) + return nil, fmt.Errorf("unable to find Object Storage key %q", keyId) } - return nil, fmt.Errorf("unable to retrieve S3 key %q, error:%w", keyId, err) + return nil, fmt.Errorf("unable to retrieve Object Storage key %q, error:%w", keyId, err) } if err := setS3KeyIdAndProperties(&s3Key, d); err != nil { diff --git a/ionoscloud/resource_s3_key_test.go b/ionoscloud/resource_s3_key_test.go index cd7d2d8fe..d8d110c2b 100644 --- a/ionoscloud/resource_s3_key_test.go +++ b/ionoscloud/resource_s3_key_test.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" ) -func TestAccS3KeyBasic(t *testing.T) { +func TestAccKeyBasic(t *testing.T) { var s3Key ionoscloud.S3Key resource.Test(t, resource.TestCase{ @@ -25,12 +25,12 @@ func TestAccS3KeyBasic(t *testing.T) { testAccPreCheck(t) }, ProtoV6ProviderFactories: testAccProtoV6ProviderFactoriesInternal(t, &testAccProvider), - CheckDestroy: testAccChecks3KeyDestroyCheck, + CheckDestroy: testAccChecksKeyDestroyCheck, Steps: []resource.TestStep{ { Config: testAccChecks3KeyConfigBasic, Check: resource.ComposeTestCheckFunc( - testAccChecks3KeyExists(constant.S3KeyResource+"."+constant.S3KeyTestResource, &s3Key), + testAccCheckKeyExists(constant.S3KeyResource+"."+constant.S3KeyTestResource, &s3Key), resource.TestCheckResourceAttrSet(constant.S3KeyResource+"."+constant.S3KeyTestResource, "secret_key"), resource.TestCheckResourceAttr(constant.S3KeyResource+"."+constant.S3KeyTestResource, "active", "true"), ), @@ -56,7 +56,7 @@ func TestAccS3KeyBasic(t *testing.T) { }) } -func testAccChecks3KeyDestroyCheck(s *terraform.State) error { +func testAccChecksKeyDestroyCheck(s *terraform.State) error { client := testAccProvider.Meta().(services.SdkBundle).CloudApiClient @@ -71,17 +71,17 @@ func testAccChecks3KeyDestroyCheck(s *terraform.State) error { if err != nil { if !httpNotFound(apiResponse) { - return fmt.Errorf("an error occurred while fetching s3 key %s: %w", rs.Primary.ID, err) + return fmt.Errorf("an error occurred while fetching Object Storage key %s: %w", rs.Primary.ID, err) } } else { - return fmt.Errorf("s3 Key still exists %s", rs.Primary.ID) + return fmt.Errorf("Object Storage Key still exists %s", rs.Primary.ID) } } return nil } -func testAccChecks3KeyExists(n string, s3Key *ionoscloud.S3Key) resource.TestCheckFunc { +func testAccCheckKeyExists(n string, s3Key *ionoscloud.S3Key) resource.TestCheckFunc { return func(s *terraform.State) error { client := testAccProvider.Meta().(services.SdkBundle).CloudApiClient @@ -101,7 +101,7 @@ func testAccChecks3KeyExists(n string, s3Key *ionoscloud.S3Key) resource.TestChe logApiRequestTime(apiResponse) if err != nil { - return fmt.Errorf("error occurred while fetching S3 Key: %s", rs.Primary.ID) + return fmt.Errorf("error occurred while fetching Object Storage Key: %s", rs.Primary.ID) } if *foundS3Key.Id != rs.Primary.ID { @@ -129,7 +129,7 @@ resource ` + constant.S3KeyResource + ` ` + constant.S3KeyTestResource + ` { active = true }` -// this step is commented since the current behaviour of s3 keys is that when you create an s3 key with active set on false +// this step is commented since the current behaviour of Object Storage keys is that when you create an Object Storage key with active set on false // it is set to true by the API, so an update from false to true can not be done // var testAccChecks3KeyConfigUpdate = ` diff --git a/services/cloudapi/flowlog/flowlog.go b/services/cloudapi/flowlog/flowlog.go index 6bf7903e7..45c1ec290 100644 --- a/services/cloudapi/flowlog/flowlog.go +++ b/services/cloudapi/flowlog/flowlog.go @@ -27,7 +27,7 @@ var FlowlogSchemaResource = &schema.Resource{ }, "bucket": { Type: schema.TypeString, - Description: "The S3 bucket name of an existing IONOS Cloud S3 bucket. Immutable, forces re-recreation of the nic resource.", + Description: "The bucket name of an existing IONOS Object Storage bucket. Immutable, forces re-recreation of the nic resource.", Required: true, }, "direction": { @@ -60,7 +60,7 @@ var FlowlogSchemaDatasource = &schema.Resource{ }, "bucket": { Type: schema.TypeString, - Description: "The S3 bucket name of an existing IONOS Cloud S3 bucket.", + Description: "The bucket name of an existing IONOS Object Storage bucket.", Computed: true, }, "direction": { diff --git a/services/s3/bucket.go b/services/objectstorage/bucket.go similarity index 97% rename from services/s3/bucket.go rename to services/objectstorage/bucket.go index beac3c7a2..6ce25962b 100644 --- a/services/s3/bucket.go +++ b/services/objectstorage/bucket.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -8,7 +8,7 @@ import ( "github.com/cenkalti/backoff/v4" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" tftags "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/tags" ) @@ -32,7 +32,7 @@ type BucketDataSourceModel struct { // CreateBucket creates a new bucket. func (c *Client) CreateBucket(ctx context.Context, name, location types.String, objectLock types.Bool, tags types.Map, timeout time.Duration) error { - createBucketConfig := s3.CreateBucketConfiguration{ + createBucketConfig := objstorage.CreateBucketConfiguration{ LocationConstraint: location.ValueStringPointer(), } diff --git a/services/s3/bucket_delete.go b/services/objectstorage/bucket_delete.go similarity index 64% rename from services/s3/bucket_delete.go rename to services/objectstorage/bucket_delete.go index c4bb11714..f9b55de5b 100644 --- a/services/s3/bucket_delete.go +++ b/services/objectstorage/bucket_delete.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -7,17 +7,17 @@ import ( "github.com/ionos-cloud/sdk-go-bundle/shared" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) const errAccessDenied = "AccessDenied" // EmptyBucket deletes all objects and delete markers in the bucket. -// If `force` is `true` then S3 Object Lock governance mode restrictions are bypassed and -// an attempt is made to remove any S3 Object Lock legal holds. +// If `force` is `true` then Object Lock governance mode restrictions are bypassed and +// an attempt is made to remove any Object Lock legal holds. // Returns the number of object versions and delete markers deleted. func (c *Client) EmptyBucket(ctx context.Context, bucket string, forceDestroy bool) (int64, error) { - objCount, err := c.forEachObjectVersionsPage(ctx, bucket, func(ctx context.Context, conn *s3.APIClient, bucket string, page *s3.ListObjectVersionsOutput) (int64, error) { + objCount, err := c.forEachObjectVersionsPage(ctx, bucket, func(ctx context.Context, conn *objstorage.APIClient, bucket string, page *objstorage.ListObjectVersionsOutput) (int64, error) { return deletePageOfObjectVersions(ctx, conn, bucket, forceDestroy, page) }) @@ -31,7 +31,7 @@ func (c *Client) EmptyBucket(ctx context.Context, bucket string, forceDestroy bo return objCount, err } -func (c *Client) forEachObjectVersionsPage(ctx context.Context, bucket string, fn func(ctx context.Context, conn *s3.APIClient, bucket string, page *s3.ListObjectVersionsOutput) (int64, error)) (int64, error) { +func (c *Client) forEachObjectVersionsPage(ctx context.Context, bucket string, fn func(ctx context.Context, conn *objstorage.APIClient, bucket string, page *objstorage.ListObjectVersionsOutput) (int64, error)) (int64, error) { var objCount int64 input := &ListObjectVersionsInput{ @@ -44,7 +44,7 @@ func (c *Client) forEachObjectVersionsPage(ctx context.Context, bucket string, f page, err := pages.NextPage(ctx) if err != nil { - return objCount, fmt.Errorf("listing S3 bucket (%s) object versions: %w", bucket, err) + return objCount, fmt.Errorf("listing bucket (%s) object versions: %w", bucket, err) } n, err := fn(ctx, c.client, bucket, page) @@ -63,14 +63,14 @@ func (c *Client) forEachObjectVersionsPage(ctx context.Context, bucket string, f return objCount, nil } -func getObjectsToDelete(page *s3.ListObjectVersionsOutput) []s3.ObjectIdentifier { +func getObjectsToDelete(page *objstorage.ListObjectVersionsOutput) []objstorage.ObjectIdentifier { if page.Versions == nil { return nil } - toDelete := make([]s3.ObjectIdentifier, 0, len(*page.Versions)) + toDelete := make([]objstorage.ObjectIdentifier, 0, len(*page.Versions)) for _, v := range *page.Versions { - toDelete = append(toDelete, s3.ObjectIdentifier{ + toDelete = append(toDelete, objstorage.ObjectIdentifier{ Key: v.Key, VersionId: v.VersionId, }) @@ -79,14 +79,14 @@ func getObjectsToDelete(page *s3.ListObjectVersionsOutput) []s3.ObjectIdentifier return toDelete } -func getDeleteMarkersToDelete(page *s3.ListObjectVersionsOutput) []s3.ObjectIdentifier { +func getDeleteMarkersToDelete(page *objstorage.ListObjectVersionsOutput) []objstorage.ObjectIdentifier { if page.DeleteMarkers == nil { return nil } - toDelete := make([]s3.ObjectIdentifier, 0, len(*page.DeleteMarkers)) + toDelete := make([]objstorage.ObjectIdentifier, 0, len(*page.DeleteMarkers)) for _, v := range *page.DeleteMarkers { - toDelete = append(toDelete, s3.ObjectIdentifier{ + toDelete = append(toDelete, objstorage.ObjectIdentifier{ Key: v.Key, VersionId: v.VersionId, }) @@ -95,14 +95,14 @@ func getDeleteMarkersToDelete(page *s3.ListObjectVersionsOutput) []s3.ObjectIden return toDelete } -func deletePageOfObjectVersions(ctx context.Context, conn *s3.APIClient, bucket string, force bool, page *s3.ListObjectVersionsOutput) (int64, error) { +func deletePageOfObjectVersions(ctx context.Context, conn *objstorage.APIClient, bucket string, force bool, page *objstorage.ListObjectVersionsOutput) (int64, error) { toDelete := getObjectsToDelete(page) var objCount int64 if objCount = int64(len(toDelete)); objCount == 0 { return objCount, nil } - req := conn.ObjectsApi.DeleteObjects(ctx, bucket).DeleteObjectsRequest(s3.DeleteObjectsRequest{ + req := conn.ObjectsApi.DeleteObjects(ctx, bucket).DeleteObjectsRequest(objstorage.DeleteObjectsRequest{ Objects: &toDelete, Quiet: shared.ToPtr(true), }) @@ -116,7 +116,7 @@ func deletePageOfObjectVersions(ctx context.Context, conn *s3.APIClient, bucket } if err != nil { - return objCount, fmt.Errorf("deleting S3 bucket (%s) object versions: %w", bucket, err) + return objCount, fmt.Errorf("deleting bucket (%s) object versions: %w", bucket, err) } if output.Errors == nil { @@ -148,20 +148,20 @@ func deletePageOfObjectVersions(ctx context.Context, conn *s3.APIClient, bucket } } if err := errors.Join(errs...); err != nil { - return objCount, fmt.Errorf("deleting S3 bucket (%s) object versions: %w", bucket, err) + return objCount, fmt.Errorf("deleting bucket (%s) object versions: %w", bucket, err) } return objCount, nil } -func deletePageOfDeleteMarkers(ctx context.Context, conn *s3.APIClient, bucket string, page *s3.ListObjectVersionsOutput) (int64, error) { +func deletePageOfDeleteMarkers(ctx context.Context, conn *objstorage.APIClient, bucket string, page *objstorage.ListObjectVersionsOutput) (int64, error) { toDelete := getDeleteMarkersToDelete(page) var objCount int64 if objCount = int64(len(toDelete)); objCount == 0 { return objCount, nil } - output, apiResponse, err := conn.ObjectsApi.DeleteObjects(ctx, bucket).DeleteObjectsRequest(s3.DeleteObjectsRequest{ + output, apiResponse, err := conn.ObjectsApi.DeleteObjects(ctx, bucket).DeleteObjectsRequest(objstorage.DeleteObjectsRequest{ Objects: &toDelete, Quiet: shared.ToPtr(true), }).Execute() @@ -170,7 +170,7 @@ func deletePageOfDeleteMarkers(ctx context.Context, conn *s3.APIClient, bucket s } if err != nil { - return objCount, fmt.Errorf("deleting S3 bucket (%s) object versions: %w", bucket, err) + return objCount, fmt.Errorf("deleting bucket (%s) object versions: %w", bucket, err) } if output.Errors == nil { @@ -184,16 +184,15 @@ func deletePageOfDeleteMarkers(ctx context.Context, conn *s3.APIClient, bucket s } if err := errors.Join(errs...); err != nil { - return objCount, fmt.Errorf("deleting S3 bucket (%s) delete markers: %w", bucket, err) + return objCount, fmt.Errorf("deleting bucket (%s) delete markers: %w", bucket, err) } return objCount, nil } -func newDeleteObjectVersionError(err s3.DeletionError) error { - s3Err := fmt.Errorf("%s: %s", *err.Code, *err.Message) - - return fmt.Errorf("deleting: %w", newObjectVersionError(*err.Key, *err.VersionId, s3Err)) +func newDeleteObjectVersionError(err objstorage.DeletionError) error { + sErr := fmt.Errorf("%s: %s", *err.Code, *err.Message) + return fmt.Errorf("deleting: %w", newObjectVersionError(*err.Key, *err.VersionId, sErr)) } func newObjectVersionError(key, versionID string, err error) error { @@ -202,8 +201,8 @@ func newObjectVersionError(key, versionID string, err error) error { } if versionID == "" { - return fmt.Errorf("S3 object (%s): %w", key, err) + return fmt.Errorf("object (%s): %w", key, err) } - return fmt.Errorf("S3 object (%s) version (%s): %w", key, versionID, err) + return fmt.Errorf("object (%s) version (%s): %w", key, versionID, err) } diff --git a/services/s3/client.go b/services/objectstorage/client.go similarity index 58% rename from services/s3/client.go rename to services/objectstorage/client.go index d4857d898..b7ac6f6f6 100644 --- a/services/s3/client.go +++ b/services/objectstorage/client.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "bytes" @@ -10,22 +10,22 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" awsv4 "github.com/aws/aws-sdk-go/aws/signer/v4" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) -// Client is a wrapper around the S3 client. +// Client is a wrapper around the Object Storage client. type Client struct { - client *s3.APIClient + client *objstorage.APIClient } // GetBaseClient returns the base client. -func (c *Client) GetBaseClient() *s3.APIClient { +func (c *Client) GetBaseClient() *objstorage.APIClient { return c.client } -// NewClient creates a new S3 client with the given credentials and region. +// NewClient creates a new Object Storage client with the given credentials and region. func NewClient(id, secret, region string) *Client { - cfg := s3.NewConfiguration() + cfg := objstorage.NewConfiguration() signer := awsv4.NewSigner(credentials.NewStaticCredentials(id, secret, "")) cfg.MiddlewareWithError = func(r *http.Request) error { var reader io.ReadSeeker @@ -42,11 +42,11 @@ func NewClient(id, secret, region string) *Client { } _, err := signer.Sign(r, reader, "s3", region, time.Now()) if errors.Is(err, credentials.ErrStaticCredentialsEmpty) { - return errors.New("s3 credentials are missing. Please set s3_access_key and s3_secret_key provider attributes or environment variables IONOS_S3_ACCESS_KEY and IONOS_S3_SECRET_KEY") + return errors.New("Object Storage credentials are missing. Please set s3_access_key and s3_secret_key provider attributes or environment variables IONOS_S3_ACCESS_KEY and IONOS_S3_SECRET_KEY") } return err } return &Client{ - client: s3.NewAPIClient(cfg), + client: objstorage.NewAPIClient(cfg), } } diff --git a/services/s3/cors.go b/services/objectstorage/cors.go similarity index 85% rename from services/s3/cors.go rename to services/objectstorage/cors.go index 80aa60e69..d9d4ff9c8 100644 --- a/services/s3/cors.go +++ b/services/objectstorage/cors.go @@ -1,13 +1,13 @@ -package s3 +package objectstorage import ( "context" "fmt" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" - convptr "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/convptr" + "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/convptr" ) // BucketCorsConfigurationModel is used to create, update and delete a bucket cors configuration. @@ -75,12 +75,12 @@ func (c *Client) DeleteBucketCors(ctx context.Context, bucketName types.String) return err } -func buildBucketCorsConfigurationModelFromAPIResponse(output *s3.GetBucketCorsOutput, data *BucketCorsConfigurationModel) *BucketCorsConfigurationModel { +func buildBucketCorsConfigurationModelFromAPIResponse(output *objstorage.GetBucketCorsOutput, data *BucketCorsConfigurationModel) *BucketCorsConfigurationModel { data.Cors = buildCorsRulesFromAPIResponse(output.CORSRules) return data } -func buildCorsRulesFromAPIResponse(rules *[]s3.CORSRule) []corsRule { +func buildCorsRulesFromAPIResponse(rules *[]objstorage.CORSRule) []corsRule { if rules == nil { return nil } @@ -99,16 +99,16 @@ func buildCorsRulesFromAPIResponse(rules *[]s3.CORSRule) []corsRule { return result } -func buildBucketCorsConfigurationFromModel(data *BucketCorsConfigurationModel) s3.PutBucketCorsRequest { - return s3.PutBucketCorsRequest{ +func buildBucketCorsConfigurationFromModel(data *BucketCorsConfigurationModel) objstorage.PutBucketCorsRequest { + return objstorage.PutBucketCorsRequest{ CORSRules: buildCorsRulesFromModel(data.Cors), } } -func buildCorsRulesFromModel(rules []corsRule) *[]s3.CORSRule { - result := make([]s3.CORSRule, 0, len(rules)) +func buildCorsRulesFromModel(rules []corsRule) *[]objstorage.CORSRule { + result := make([]objstorage.CORSRule, 0, len(rules)) for _, r := range rules { - result = append(result, s3.CORSRule{ + result = append(result, objstorage.CORSRule{ ID: convptr.Int64ToInt32(r.ID.ValueInt64Pointer()), AllowedHeaders: toStrings(r.AllowedHeaders), AllowedMethods: toStrings(r.AllowedMethods), diff --git a/services/objectstorage/errors.go b/services/objectstorage/errors.go new file mode 100644 index 000000000..1999d38a9 --- /dev/null +++ b/services/objectstorage/errors.go @@ -0,0 +1,44 @@ +package objectstorage + +import ( + "encoding/xml" + "errors" + "log" + + objstorage "github.com/ionos-cloud/sdk-go-s3" +) + +func isBucketNotEmptyError(err error) bool { + var apiErr objstorage.GenericOpenAPIError + if errors.As(err, &apiErr) { + body := apiErr.Body() + var objStoreErr objstorage.Error + if err := xml.Unmarshal(body, &objStoreErr); err != nil { + log.Printf("failed to unmarshal error response: %v", err) + return false + } + + if objStoreErr.Code != nil && *objStoreErr.Code == "BucketNotEmpty" { + return true + } + } + return false +} + +func isInvalidStateBucketWithObjectLock(err error) bool { + var apiErr objstorage.GenericOpenAPIError + if errors.As(err, &apiErr) { + body := apiErr.Body() + var objStoreErr objstorage.Error + if err := xml.Unmarshal(body, &objStoreErr); err != nil { + log.Printf("failed to unmarshal error response: %v", err) + return false + } + + if objStoreErr.Code != nil && *objStoreErr.Code == "InvalidBucketState" && + objStoreErr.Message != nil && *objStoreErr.Message == "bucket versioning cannot be disabled on buckets with object lock enabled" { + return true + } + } + return false +} diff --git a/services/s3/lifecycle.go b/services/objectstorage/lifecycle.go similarity index 82% rename from services/s3/lifecycle.go rename to services/objectstorage/lifecycle.go index 13a29944d..63109afbc 100644 --- a/services/s3/lifecycle.go +++ b/services/objectstorage/lifecycle.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -7,7 +7,7 @@ import ( "fmt" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" convptr "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/convptr" hash2 "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/hash" @@ -102,12 +102,12 @@ func (c *Client) DeleteBucketLifecycle(ctx context.Context, bucketName types.Str return err } -func buildBucketLifecycleConfigurationModelFromAPIResponse(output *s3.GetBucketLifecycleOutput, data *BucketLifecycleConfigurationModel) *BucketLifecycleConfigurationModel { +func buildBucketLifecycleConfigurationModelFromAPIResponse(output *objstorage.GetBucketLifecycleOutput, data *BucketLifecycleConfigurationModel) *BucketLifecycleConfigurationModel { data.Rule = buildRulesFromAPIResponse(output.Rules) return data } -func buildRulesFromAPIResponse(rules *[]s3.Rule) []lifecycleRule { +func buildRulesFromAPIResponse(rules *[]objstorage.Rule) []lifecycleRule { if rules == nil { return nil } @@ -127,7 +127,7 @@ func buildRulesFromAPIResponse(rules *[]s3.Rule) []lifecycleRule { return result } -func buildExpirationFromAPIResponse(exp *s3.LifecycleExpiration) *expiration { +func buildExpirationFromAPIResponse(exp *objstorage.LifecycleExpiration) *expiration { if exp == nil { return nil } @@ -139,7 +139,7 @@ func buildExpirationFromAPIResponse(exp *s3.LifecycleExpiration) *expiration { } } -func buildNoncurrentVersionExpirationFromAPIResponse(exp *s3.NoncurrentVersionExpiration) *noncurrentVersionExpiration { +func buildNoncurrentVersionExpirationFromAPIResponse(exp *objstorage.NoncurrentVersionExpiration) *noncurrentVersionExpiration { if exp == nil { return nil } @@ -149,7 +149,7 @@ func buildNoncurrentVersionExpirationFromAPIResponse(exp *s3.NoncurrentVersionEx } } -func buildAbortIncompleteMultipartUploadFromAPIResponse(abort *s3.AbortIncompleteMultipartUpload) *abortIncompleteMultipartUpload { +func buildAbortIncompleteMultipartUploadFromAPIResponse(abort *objstorage.AbortIncompleteMultipartUpload) *abortIncompleteMultipartUpload { if abort == nil { return nil } @@ -159,23 +159,23 @@ func buildAbortIncompleteMultipartUploadFromAPIResponse(abort *s3.AbortIncomplet } } -func buildBucketLifecycleConfigurationFromModel(data *BucketLifecycleConfigurationModel) s3.PutBucketLifecycleRequest { - return s3.PutBucketLifecycleRequest{ +func buildBucketLifecycleConfigurationFromModel(data *BucketLifecycleConfigurationModel) objstorage.PutBucketLifecycleRequest { + return objstorage.PutBucketLifecycleRequest{ Rules: buildRulesFromModel(data.Rule), } } -func buildRulesFromModel(rules []lifecycleRule) *[]s3.Rule { +func buildRulesFromModel(rules []lifecycleRule) *[]objstorage.Rule { if rules == nil { return nil } - result := make([]s3.Rule, 0, len(rules)) + result := make([]objstorage.Rule, 0, len(rules)) for _, r := range rules { - result = append(result, s3.Rule{ + result = append(result, objstorage.Rule{ ID: r.ID.ValueStringPointer(), Prefix: r.Prefix.ValueStringPointer(), - Status: s3.ExpirationStatus(r.Status.ValueString()).Ptr(), + Status: objstorage.ExpirationStatus(r.Status.ValueString()).Ptr(), Expiration: buildExpirationFromModel(r.Expiration), NoncurrentVersionExpiration: buildNoncurrentVersionExpirationFromModel(r.NoncurrentVersionExpiration), AbortIncompleteMultipartUpload: buildAbortIncompleteMultipartUploadFromModel(r.AbortIncompleteMultipartUpload), @@ -185,34 +185,34 @@ func buildRulesFromModel(rules []lifecycleRule) *[]s3.Rule { return &result } -func buildExpirationFromModel(expiration *expiration) *s3.LifecycleExpiration { +func buildExpirationFromModel(expiration *expiration) *objstorage.LifecycleExpiration { if expiration == nil { return nil } - return &s3.LifecycleExpiration{ + return &objstorage.LifecycleExpiration{ Days: convptr.Int64ToInt32(expiration.Days.ValueInt64Pointer()), Date: expiration.Date.ValueStringPointer(), ExpiredObjectDeleteMarker: expiration.ExpiredObjectDeleteMarker.ValueBoolPointer(), } } -func buildNoncurrentVersionExpirationFromModel(expiration *noncurrentVersionExpiration) *s3.NoncurrentVersionExpiration { +func buildNoncurrentVersionExpirationFromModel(expiration *noncurrentVersionExpiration) *objstorage.NoncurrentVersionExpiration { if expiration == nil { return nil } - return &s3.NoncurrentVersionExpiration{ + return &objstorage.NoncurrentVersionExpiration{ NoncurrentDays: convptr.Int64ToInt32(expiration.NoncurrentDays.ValueInt64Pointer()), } } -func buildAbortIncompleteMultipartUploadFromModel(abort *abortIncompleteMultipartUpload) *s3.AbortIncompleteMultipartUpload { +func buildAbortIncompleteMultipartUploadFromModel(abort *abortIncompleteMultipartUpload) *objstorage.AbortIncompleteMultipartUpload { if abort == nil { return nil } - return &s3.AbortIncompleteMultipartUpload{ + return &objstorage.AbortIncompleteMultipartUpload{ DaysAfterInitiation: convptr.Int64ToInt32(abort.DaysAfterInitiation.ValueInt64Pointer()), } } diff --git a/services/s3/object.go b/services/objectstorage/object.go similarity index 93% rename from services/s3/object.go rename to services/objectstorage/object.go index 1e10bdb93..405c03aab 100644 --- a/services/s3/object.go +++ b/services/objectstorage/object.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -14,7 +14,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" "github.com/mitchellh/go-homedir" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/tags" @@ -83,7 +83,7 @@ type ObjectDataSourceModel struct { } // UploadObject uploads an object to a bucket. -func (c *Client) UploadObject(ctx context.Context, data *ObjectResourceModel) (*s3.APIResponse, error) { +func (c *Client) UploadObject(ctx context.Context, data *ObjectResourceModel) (*objstorage.APIResponse, error) { putReq := c.client.ObjectsApi.PutObject(ctx, data.Bucket.ValueString(), data.Key.ValueString()) err := fillPutObjectRequest(&putReq, data) if err != nil { @@ -187,7 +187,7 @@ func (c *Client) UpdateObject(ctx context.Context, plan, state *ObjectResourceMo func (c *Client) DeleteObject(ctx context.Context, data *ObjectResourceModel) error { var ( err error - resp *s3.APIResponse + resp *objstorage.APIResponse ) if !data.VersionID.IsNull() { @@ -208,7 +208,7 @@ func (c *Client) DeleteObject(ctx context.Context, data *ObjectResourceModel) er } // SetObjectComputedAttributes sets computed attributes for an object. -func (c *Client) SetObjectComputedAttributes(ctx context.Context, data *ObjectResourceModel, apiResponse *s3.APIResponse) error { +func (c *Client) SetObjectComputedAttributes(ctx context.Context, data *ObjectResourceModel, apiResponse *objstorage.APIResponse) error { contentType := apiResponse.Header.Get("Content-Type") if contentType != "" { data.ContentType = types.StringValue(contentType) @@ -221,6 +221,8 @@ func (c *Client) SetObjectComputedAttributes(ctx context.Context, data *ObjectRe data.Etag = types.StringValue(strings.Trim(etag, "\"")) } + data.ServerSideEncryption = types.StringValue(apiResponse.Header.Get("x-amz-server-side-encryption")) + contentType, err := c.getContentType(ctx, &objectFindRequest{ Bucket: data.Bucket, Key: data.Key, @@ -244,7 +246,7 @@ func (c *Client) SetObjectComputedAttributes(ctx context.Context, data *ObjectRe func (c *Client) updateObjectLock(ctx context.Context, plan, state *ObjectResourceModel) error { if !plan.ObjectLockLegalHold.Equal(state.ObjectLockLegalHold) { _, err := c.client.ObjectLockApi.PutObjectLegalHold(ctx, state.Bucket.ValueString(), state.Key.ValueString()). - ObjectLegalHoldConfiguration(s3.ObjectLegalHoldConfiguration{Status: plan.ObjectLockLegalHold.ValueStringPointer()}). + ObjectLegalHoldConfiguration(objstorage.ObjectLegalHoldConfiguration{Status: plan.ObjectLockLegalHold.ValueStringPointer()}). Execute() if err != nil { return fmt.Errorf("failed to update object lock legal hold: %w", err) @@ -260,14 +262,14 @@ func (c *Client) updateObjectLock(ctx context.Context, plan, state *ObjectResour return nil } -func (c *Client) putRetention(ctx context.Context, plan, state *ObjectResourceModel) (*s3.APIResponse, error) { +func (c *Client) putRetention(ctx context.Context, plan, state *ObjectResourceModel) (*objstorage.APIResponse, error) { retentionDate, err := getRetentionDate(plan.ObjectLockRetainUntilDate) if err != nil { return nil, fmt.Errorf("failed to parse object lock retain until date: %w", err) } baseReq := c.client.ObjectLockApi.PutObjectRetention(ctx, state.Bucket.ValueString(), state.Key.ValueString()). - PutObjectRetentionRequest(s3.PutObjectRetentionRequest{ + PutObjectRetentionRequest(objstorage.PutObjectRetentionRequest{ Mode: plan.ObjectLockMode.ValueStringPointer(), RetainUntilDate: &retentionDate, }) @@ -285,7 +287,7 @@ func (c *Client) putRetention(ctx context.Context, plan, state *ObjectResourceMo return baseReq.Execute() } -func (c *Client) setObjectCommonAttributes(ctx context.Context, data *ObjectResourceModel, apiResponse *s3.APIResponse) error { +func (c *Client) setObjectCommonAttributes(ctx context.Context, data *ObjectResourceModel, apiResponse *objstorage.APIResponse) error { setContentData(data, apiResponse) setServerSideEncryptionData(data, apiResponse) if err := setObjectLockData(data, apiResponse); err != nil { @@ -333,7 +335,7 @@ func (c *Client) getTags(ctx context.Context, bucket, key string) (types.Map, er return tagsMap, nil } -func (c *Client) setObjectModelData(ctx context.Context, apiResponse *s3.APIResponse, data *ObjectResourceModel) error { +func (c *Client) setObjectModelData(ctx context.Context, apiResponse *objstorage.APIResponse, data *ObjectResourceModel) error { if err := c.setObjectCommonAttributes(ctx, data, apiResponse); err != nil { return err } @@ -396,7 +398,7 @@ type objectFindRequest struct { ServerSideEncryptionCustomerKeyMD5 types.String } -func (c *Client) findObject(ctx context.Context, data *objectFindRequest) (*s3.HeadObjectOutput, *s3.APIResponse, error) { +func (c *Client) findObject(ctx context.Context, data *objectFindRequest) (*objstorage.HeadObjectOutput, *objstorage.APIResponse, error) { req := c.client.ObjectsApi.HeadObject(ctx, data.Bucket.ValueString(), data.Key.ValueString()) if !data.Etag.IsNull() { req = req.IfMatch(data.Etag.ValueString()) @@ -444,16 +446,16 @@ func getRetentionDate(d types.String) (string, error) { return t.UTC().Format(time.RFC3339), nil } -func expandObjectDate(v string) *s3.IonosTime { +func expandObjectDate(v string) *objstorage.IonosTime { t, err := time.Parse(time.RFC3339, v) if err != nil { - return &s3.IonosTime{Time: time.Time{}.UTC()} + return &objstorage.IonosTime{Time: time.Time{}.UTC()} } - return &s3.IonosTime{Time: t.UTC()} + return &objstorage.IonosTime{Time: t.UTC()} } -func setContentData(data *ObjectResourceModel, apiResponse *s3.APIResponse) { +func setContentData(data *ObjectResourceModel, apiResponse *objstorage.APIResponse) { contentType := apiResponse.Header.Get("Content-Type") if contentType != "" { data.ContentType = types.StringValue(contentType) @@ -492,7 +494,7 @@ func setContentData(data *ObjectResourceModel, apiResponse *s3.APIResponse) { } } -func setServerSideEncryptionData(data *ObjectResourceModel, apiResponse *s3.APIResponse) { +func setServerSideEncryptionData(data *ObjectResourceModel, apiResponse *objstorage.APIResponse) { serverSideEncryption := apiResponse.Header.Get("x-amz-server-side-encryption") if serverSideEncryption != "" { data.ServerSideEncryption = types.StringValue(serverSideEncryption) @@ -515,7 +517,7 @@ func setServerSideEncryptionData(data *ObjectResourceModel, apiResponse *s3.APIR } -func setObjectLockData(data *ObjectResourceModel, apiResponse *s3.APIResponse) error { +func setObjectLockData(data *ObjectResourceModel, apiResponse *objstorage.APIResponse) error { objectLockMode := apiResponse.Header.Get("x-amz-object-lock-mode") if objectLockMode != "" { data.ObjectLockMode = types.StringValue(objectLockMode) @@ -539,7 +541,7 @@ func setObjectLockData(data *ObjectResourceModel, apiResponse *s3.APIResponse) e return nil } -func getMetadataFromAPIResponse(ctx context.Context, apiResponse *s3.APIResponse) (types.Map, error) { +func getMetadataFromAPIResponse(ctx context.Context, apiResponse *objstorage.APIResponse) (types.Map, error) { metadataMap := getMetadataMapFromHeaders(apiResponse, "X-Amz-Meta-") if len(metadataMap) > 0 { @@ -554,7 +556,7 @@ func getMetadataFromAPIResponse(ctx context.Context, apiResponse *s3.APIResponse return types.MapNull(types.StringType), nil } -func getMetadataMapFromHeaders(apiResponse *s3.APIResponse, prefix string) map[string]string { +func getMetadataMapFromHeaders(apiResponse *objstorage.APIResponse, prefix string) map[string]string { metaHeaders := map[string]string{} for name, values := range apiResponse.Header { if strings.HasPrefix(strings.ToLower(name), strings.ToLower(prefix)) { @@ -585,7 +587,7 @@ func (c *Client) getContentType(ctx context.Context, data *objectFindRequest) (s return apiResponse.Header.Get("Content-Type"), nil } -func deleteObjectByModel(ctx context.Context, client *s3.APIClient, data *ObjectResourceModel) (map[string]interface{}, *s3.APIResponse, error) { +func deleteObjectByModel(ctx context.Context, client *objstorage.APIClient, data *ObjectResourceModel) (map[string]interface{}, *objstorage.APIResponse, error) { req := client.ObjectsApi.DeleteObject(ctx, data.Bucket.ValueString(), data.Key.ValueString()) if !data.VersionID.IsNull() { req = req.VersionId(data.VersionID.ValueString()) @@ -629,7 +631,7 @@ func getBody(data *ObjectResourceModel) (*os.File, error) { return nil, nil } -func fillContentData(data *ObjectResourceModel, req *s3.ApiPutObjectRequest) error { +func fillContentData(data *ObjectResourceModel, req *objstorage.ApiPutObjectRequest) error { if !data.CacheControl.IsNull() { *req = req.CacheControl(data.CacheControl.ValueString()) } @@ -662,7 +664,7 @@ func fillContentData(data *ObjectResourceModel, req *s3.ApiPutObjectRequest) err return nil } -func fillServerSideEncryptionData(data *ObjectResourceModel, req *s3.ApiPutObjectRequest) { +func fillServerSideEncryptionData(data *ObjectResourceModel, req *objstorage.ApiPutObjectRequest) { if !data.ServerSideEncryption.IsNull() { *req = req.XAmzServerSideEncryption(data.ServerSideEncryption.ValueString()) } @@ -684,7 +686,7 @@ func fillServerSideEncryptionData(data *ObjectResourceModel, req *s3.ApiPutObjec } } -func fillObjectLockData(data *ObjectResourceModel, req *s3.ApiPutObjectRequest) error { +func fillObjectLockData(data *ObjectResourceModel, req *objstorage.ApiPutObjectRequest) error { if !data.ObjectLockMode.IsNull() { *req = req.XAmzObjectLockMode(data.ObjectLockMode.ValueString()) } @@ -705,7 +707,7 @@ func fillObjectLockData(data *ObjectResourceModel, req *s3.ApiPutObjectRequest) return nil } -func fillPutObjectRequest(req *s3.ApiPutObjectRequest, data *ObjectResourceModel) error { +func fillPutObjectRequest(req *objstorage.ApiPutObjectRequest, data *ObjectResourceModel) error { fillServerSideEncryptionData(data, req) if err := fillContentData(data, req); err != nil { return err @@ -751,7 +753,7 @@ func needsMD5Header(data *ObjectResourceModel) bool { return !data.ObjectLockMode.IsNull() || !data.ObjectLockRetainUntilDate.IsNull() || !data.ObjectLockLegalHold.IsNull() } -func addMD5Header(req *s3.ApiPutObjectRequest, file io.ReadSeeker) error { +func addMD5Header(req *objstorage.ApiPutObjectRequest, file io.ReadSeeker) error { body, err := io.ReadAll(file) if err != nil { return fmt.Errorf("failed to read file content: %w", err) diff --git a/services/s3/object_copy.go b/services/objectstorage/object_copy.go similarity index 95% rename from services/s3/object_copy.go rename to services/objectstorage/object_copy.go index f4161146a..60b4e0f27 100644 --- a/services/s3/object_copy.go +++ b/services/objectstorage/object_copy.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // ObjectCopyResourceModel defines the fields for the Terraform resource model. @@ -122,7 +122,7 @@ func (c *Client) UpdateObjectCopy(ctx context.Context, plan, state *ObjectCopyRe func (c *Client) DeleteObjectCopy(ctx context.Context, data *ObjectCopyResourceModel) error { var ( err error - resp *s3.APIResponse + resp *objstorage.APIResponse ) if !data.VersionID.IsNull() { @@ -142,7 +142,7 @@ func (c *Client) DeleteObjectCopy(ctx context.Context, data *ObjectCopyResourceM return err } -func (c *Client) setObjectCopyComputedAttributes(ctx context.Context, data *ObjectCopyResourceModel, apiResponse *s3.APIResponse, output *s3.CopyObjectResult) error { +func (c *Client) setObjectCopyComputedAttributes(ctx context.Context, data *ObjectCopyResourceModel, apiResponse *objstorage.APIResponse, output *objstorage.CopyObjectResult) error { contentType := apiResponse.Header.Get("Content-Type") if contentType != "" { data.ContentType = types.StringValue(contentType) @@ -174,7 +174,7 @@ func (c *Client) setObjectCopyComputedAttributes(ctx context.Context, data *Obje return nil } -func deleteObjectCopyByModel(ctx context.Context, client *s3.APIClient, data *ObjectCopyResourceModel) (map[string]interface{}, *s3.APIResponse, error) { +func deleteObjectCopyByModel(ctx context.Context, client *objstorage.APIClient, data *ObjectCopyResourceModel) (map[string]interface{}, *objstorage.APIResponse, error) { req := client.ObjectsApi.DeleteObject(ctx, data.Bucket.ValueString(), data.Key.ValueString()) if !data.VersionID.IsNull() { req = req.VersionId(data.VersionID.ValueString()) @@ -191,7 +191,7 @@ func hasCopyConditions(plan *ObjectCopyResourceModel) bool { return !plan.CopyIfMatch.IsNull() || !plan.CopyIfModifiedSince.IsNull() || !plan.CopyIfNoneMatch.IsNull() || !plan.CopyIfUnmodifiedSince.IsNull() } -func fillObjectCopyContentData(data *ObjectCopyResourceModel, req *s3.ApiCopyObjectRequest) error { +func fillObjectCopyContentData(data *ObjectCopyResourceModel, req *objstorage.ApiCopyObjectRequest) error { if !data.CacheControl.IsNull() { *req = req.CacheControl(data.CacheControl.ValueString()) } @@ -254,7 +254,7 @@ func fillObjectCopyContentData(data *ObjectCopyResourceModel, req *s3.ApiCopyObj return nil } -func fillObjectCopyServerSideEncryptionData(data *ObjectCopyResourceModel, req *s3.ApiCopyObjectRequest) { +func fillObjectCopyServerSideEncryptionData(data *ObjectCopyResourceModel, req *objstorage.ApiCopyObjectRequest) { if !data.ServerSideEncryption.IsNull() { *req = req.XAmzServerSideEncryption(data.ServerSideEncryption.ValueString()) } @@ -284,7 +284,7 @@ func fillObjectCopyServerSideEncryptionData(data *ObjectCopyResourceModel, req * } } -func fillObjectCopyLockData(data *ObjectCopyResourceModel, req *s3.ApiCopyObjectRequest) error { +func fillObjectCopyLockData(data *ObjectCopyResourceModel, req *objstorage.ApiCopyObjectRequest) error { if !data.ObjectLockMode.IsNull() { *req = req.XAmzObjectLockMode(data.ObjectLockMode.ValueString()) } @@ -305,7 +305,7 @@ func fillObjectCopyLockData(data *ObjectCopyResourceModel, req *s3.ApiCopyObject return nil } -func fillObjectCopyRequest(req *s3.ApiCopyObjectRequest, data *ObjectCopyResourceModel) error { +func fillObjectCopyRequest(req *objstorage.ApiCopyObjectRequest, data *ObjectCopyResourceModel) error { fillObjectCopyServerSideEncryptionData(data, req) if err := fillObjectCopyContentData(data, req); err != nil { return err @@ -376,7 +376,7 @@ func hasObjectCopyContentChanges(plan, state *ObjectCopyResourceModel) bool { return needsChange } -func (c *Client) setObjectCopyCommonAttributes(ctx context.Context, data *ObjectCopyResourceModel, apiResponse *s3.APIResponse) error { +func (c *Client) setObjectCopyCommonAttributes(ctx context.Context, data *ObjectCopyResourceModel, apiResponse *objstorage.APIResponse) error { setObjectCopyContentData(data, apiResponse) setObjectCopyServerSideEncryptionData(data, apiResponse) if err := setObjectCopyObjectLockData(data, apiResponse); err != nil { @@ -408,7 +408,7 @@ func (c *Client) setObjectCopyCommonAttributes(ctx context.Context, data *Object return nil } -func setObjectCopyContentData(data *ObjectCopyResourceModel, apiResponse *s3.APIResponse) { +func setObjectCopyContentData(data *ObjectCopyResourceModel, apiResponse *objstorage.APIResponse) { contentType := apiResponse.Header.Get("Content-Type") if contentType != "" { data.ContentType = types.StringValue(contentType) @@ -447,7 +447,7 @@ func setObjectCopyContentData(data *ObjectCopyResourceModel, apiResponse *s3.API } } -func setObjectCopyServerSideEncryptionData(data *ObjectCopyResourceModel, apiResponse *s3.APIResponse) { +func setObjectCopyServerSideEncryptionData(data *ObjectCopyResourceModel, apiResponse *objstorage.APIResponse) { serverSideEncryption := apiResponse.Header.Get("x-amz-server-side-encryption") if serverSideEncryption != "" { data.ServerSideEncryption = types.StringValue(serverSideEncryption) @@ -470,7 +470,7 @@ func setObjectCopyServerSideEncryptionData(data *ObjectCopyResourceModel, apiRes } -func setObjectCopyObjectLockData(data *ObjectCopyResourceModel, apiResponse *s3.APIResponse) error { +func setObjectCopyObjectLockData(data *ObjectCopyResourceModel, apiResponse *objstorage.APIResponse) error { objectLockMode := apiResponse.Header.Get("x-amz-object-lock-mode") if objectLockMode != "" { data.ObjectLockMode = types.StringValue(objectLockMode) diff --git a/services/s3/object_data_source.go b/services/objectstorage/object_data_source.go similarity index 93% rename from services/s3/object_data_source.go rename to services/objectstorage/object_data_source.go index c1de53035..7c0a6ba0a 100644 --- a/services/s3/object_data_source.go +++ b/services/objectstorage/object_data_source.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // GetObjectForDataSource retrieves an object for a data source. @@ -37,7 +37,7 @@ func (c *Client) GetObjectForDataSource(ctx context.Context, data *ObjectDataSou return data, true, nil } -func (c *Client) setObjectDataSourceModelData(ctx context.Context, apiResponse *s3.APIResponse, data *ObjectDataSourceModel) error { +func (c *Client) setObjectDataSourceModelData(ctx context.Context, apiResponse *objstorage.APIResponse, data *ObjectDataSourceModel) error { if err := c.setObjectDataSourceCommonAttributes(ctx, data, apiResponse); err != nil { return err } @@ -63,7 +63,7 @@ func (c *Client) setObjectDataSourceModelData(ctx context.Context, apiResponse * return nil } -func (c *Client) setObjectDataSourceCommonAttributes(ctx context.Context, data *ObjectDataSourceModel, apiResponse *s3.APIResponse) error { +func (c *Client) setObjectDataSourceCommonAttributes(ctx context.Context, data *ObjectDataSourceModel, apiResponse *objstorage.APIResponse) error { setObjectDataSourceContentData(data, apiResponse) setObjectDataSourceServerSideEncryptionData(data, apiResponse) if err := setObjectDataSourceObjectLockData(data, apiResponse); err != nil { @@ -98,7 +98,7 @@ func (c *Client) setObjectDataSourceCommonAttributes(ctx context.Context, data * return nil } -func setObjectDataSourceContentData(data *ObjectDataSourceModel, apiResponse *s3.APIResponse) { +func setObjectDataSourceContentData(data *ObjectDataSourceModel, apiResponse *objstorage.APIResponse) { contentType := apiResponse.Header.Get("Content-Type") if contentType != "" { data.ContentType = types.StringValue(contentType) @@ -137,7 +137,7 @@ func setObjectDataSourceContentData(data *ObjectDataSourceModel, apiResponse *s3 } } -func setObjectDataSourceServerSideEncryptionData(data *ObjectDataSourceModel, apiResponse *s3.APIResponse) { +func setObjectDataSourceServerSideEncryptionData(data *ObjectDataSourceModel, apiResponse *objstorage.APIResponse) { serverSideEncryption := apiResponse.Header.Get("x-amz-server-side-encryption") if serverSideEncryption != "" { data.ServerSideEncryption = types.StringValue(serverSideEncryption) @@ -160,7 +160,7 @@ func setObjectDataSourceServerSideEncryptionData(data *ObjectDataSourceModel, ap } -func setObjectDataSourceObjectLockData(data *ObjectDataSourceModel, apiResponse *s3.APIResponse) error { +func setObjectDataSourceObjectLockData(data *ObjectDataSourceModel, apiResponse *objstorage.APIResponse) error { objectLockMode := apiResponse.Header.Get("x-amz-object-lock-mode") if objectLockMode != "" { data.ObjectLockMode = types.StringValue(objectLockMode) diff --git a/services/s3/object_delete.go b/services/objectstorage/object_delete.go similarity index 76% rename from services/s3/object_delete.go rename to services/objectstorage/object_delete.go index 24e792126..b8488e5da 100644 --- a/services/s3/object_delete.go +++ b/services/objectstorage/object_delete.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -7,10 +7,10 @@ import ( "github.com/ionos-cloud/sdk-go-bundle/shared" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) -// DeleteRequest represents a request to delete an object from an S3 general purpose bucket. +// DeleteRequest represents a request to delete an object from a general purpose bucket. type DeleteRequest struct { Bucket string Key string @@ -18,12 +18,12 @@ type DeleteRequest struct { ForceDestroy bool } -// DeleteAllObjectVersions deletes all versions of a specified key from an S3 general purpose bucket. +// DeleteAllObjectVersions deletes all versions of a specified key from a general purpose bucket. // If key is empty then all versions of all objects are deleted. -// Set `force` to `true` to override any S3 object lock protections on object lock enabled buckets. +// Set `force` to `true` to override any object lock protections on object lock enabled buckets. // Returns the number of objects deleted. // Use `emptyBucket` to delete all versions of all objects in a bucket. -func DeleteAllObjectVersions(ctx context.Context, client *s3.APIClient, req *DeleteRequest) (int, error) { +func DeleteAllObjectVersions(ctx context.Context, client *objstorage.APIClient, req *DeleteRequest) (int, error) { var ( objCount int lastErr error @@ -66,13 +66,13 @@ func DeleteAllObjectVersions(ctx context.Context, client *s3.APIClient, req *Del } if lastErr != nil { - return objCount, fmt.Errorf("deleting at least one S3 Object delete marker, last error: %w", lastErr) + return objCount, fmt.Errorf("deleting at least one object delete marker, last error: %w", lastErr) } return objCount, nil } -func deleteObject(ctx context.Context, client *s3.APIClient, req *DeleteRequest) (*s3.APIResponse, error) { +func deleteObject(ctx context.Context, client *objstorage.APIClient, req *DeleteRequest) (*objstorage.APIResponse, error) { r := client.ObjectsApi.DeleteObject(ctx, req.Bucket, req.Key) if req.VersionID != "" { r = r.VersionId(req.VersionID) @@ -86,7 +86,7 @@ func deleteObject(ctx context.Context, client *s3.APIClient, req *DeleteRequest) return apiResponse, err } -func deleteVersionsPage(ctx context.Context, client *s3.APIClient, versions *[]s3.ObjectVersion, bucket, key string, force bool) (int, error) { +func deleteVersionsPage(ctx context.Context, client *objstorage.APIClient, versions *[]objstorage.ObjectVersion, bucket, key string, force bool) (int, error) { var ( objCount int lastErr error @@ -145,7 +145,7 @@ func deleteVersionsPage(ctx context.Context, client *s3.APIClient, versions *[]s return objCount, lastErr } -func deleteMarkersPage(ctx context.Context, client *s3.APIClient, markers *[]s3.DeleteMarkerEntry, bucket, key string) (int, error) { +func deleteMarkersPage(ctx context.Context, client *objstorage.APIClient, markers *[]objstorage.DeleteMarkerEntry, bucket, key string) (int, error) { var ( objCount int lastErr error @@ -178,7 +178,7 @@ func deleteMarkersPage(ctx context.Context, client *s3.APIClient, markers *[]s3. } -func tryDisableLegalHold(ctx context.Context, client *s3.APIClient, bucket, key, versionID string) (bool, error) { +func tryDisableLegalHold(ctx context.Context, client *objstorage.APIClient, bucket, key, versionID string) (bool, error) { output, _, err := client.ObjectLockApi.GetObjectLegalHold(ctx, bucket, key).VersionId(versionID).Execute() if err != nil { return false, err @@ -189,7 +189,7 @@ func tryDisableLegalHold(ctx context.Context, client *s3.APIClient, bucket, key, } _, err = client.ObjectLockApi.PutObjectLegalHold(ctx, bucket, key).VersionId(versionID). - ObjectLegalHoldConfiguration(s3.ObjectLegalHoldConfiguration{ + ObjectLegalHoldConfiguration(objstorage.ObjectLegalHoldConfiguration{ Status: shared.ToPtr("OFF"), }).Execute() @@ -200,6 +200,6 @@ func tryDisableLegalHold(ctx context.Context, client *s3.APIClient, bucket, key, return true, nil } -func httpForbidden(response *s3.APIResponse) bool { +func httpForbidden(response *objstorage.APIResponse) bool { return response != nil && response.Response != nil && response.StatusCode == http.StatusForbidden } diff --git a/services/s3/object_lock.go b/services/objectstorage/object_lock.go similarity index 89% rename from services/s3/object_lock.go rename to services/objectstorage/object_lock.go index d11afb087..7396dc405 100644 --- a/services/s3/object_lock.go +++ b/services/objectstorage/object_lock.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -7,7 +7,7 @@ import ( "fmt" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" convptr "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/convptr" hash2 "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/hash" @@ -100,7 +100,7 @@ func (c *Client) UpdateObjectLock(ctx context.Context, data *ObjectLockConfigura return nil } -func buildObjectLockConfigurationModelFromAPIResponse(output *s3.GetObjectLockConfigurationOutput, data *ObjectLockConfigurationModel) *ObjectLockConfigurationModel { +func buildObjectLockConfigurationModelFromAPIResponse(output *objstorage.GetObjectLockConfigurationOutput, data *ObjectLockConfigurationModel) *ObjectLockConfigurationModel { built := &ObjectLockConfigurationModel{ Bucket: data.Bucket, ObjectLockEnabled: types.StringPointerValue(output.ObjectLockEnabled), @@ -118,11 +118,11 @@ func buildObjectLockConfigurationModelFromAPIResponse(output *s3.GetObjectLockCo return built } -func buildObjectLockConfigurationFromModel(data *ObjectLockConfigurationModel) s3.PutObjectLockConfigurationRequest { - req := s3.PutObjectLockConfigurationRequest{ +func buildObjectLockConfigurationFromModel(data *ObjectLockConfigurationModel) objstorage.PutObjectLockConfigurationRequest { + req := objstorage.PutObjectLockConfigurationRequest{ ObjectLockEnabled: data.ObjectLockEnabled.ValueStringPointer(), - Rule: &s3.PutObjectLockConfigurationRequestRule{ - DefaultRetention: &s3.DefaultRetention{ + Rule: &objstorage.PutObjectLockConfigurationRequestRule{ + DefaultRetention: &objstorage.DefaultRetention{ Mode: data.Rule.DefaultRetention.Mode.ValueStringPointer(), Days: convptr.Int64ToInt32(data.Rule.DefaultRetention.Days.ValueInt64Pointer()), Years: convptr.Int64ToInt32(data.Rule.DefaultRetention.Years.ValueInt64Pointer()), diff --git a/services/s3/objects.go b/services/objectstorage/objects.go similarity index 99% rename from services/s3/objects.go rename to services/objectstorage/objects.go index 4b60dde55..417262e7d 100644 --- a/services/s3/objects.go +++ b/services/objectstorage/objects.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" diff --git a/services/s3/objects_paginator.go b/services/objectstorage/objects_paginator.go similarity index 87% rename from services/s3/objects_paginator.go rename to services/objectstorage/objects_paginator.go index 6e99e5a37..f2dbe56da 100644 --- a/services/s3/objects_paginator.go +++ b/services/objectstorage/objects_paginator.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // ListObjectsV2Input represents the input for the ListObjectsV2Paginator. @@ -19,15 +19,15 @@ type ListObjectsV2Input struct { MaxKeys *int32 } -// ListObjectsV2Paginator is a paginator for listing objects from S3 bucket +// ListObjectsV2Paginator is a paginator for listing objects from bucket type ListObjectsV2Paginator struct { - client *s3.APIClient + client *objstorage.APIClient input *ListObjectsV2Input hasMore bool } // NewListObjectsV2Paginator creates a new ListObjectsV2Paginator. -func NewListObjectsV2Paginator(client *s3.APIClient, input *ListObjectsV2Input) *ListObjectsV2Paginator { +func NewListObjectsV2Paginator(client *objstorage.APIClient, input *ListObjectsV2Input) *ListObjectsV2Paginator { if input.MaxKeys == nil { defaultValue := int32(1000) input.MaxKeys = &defaultValue @@ -46,7 +46,7 @@ func (p *ListObjectsV2Paginator) HasMorePages() bool { } // NextPage retrieves the next page of objects. -func (p *ListObjectsV2Paginator) NextPage(ctx context.Context) (*s3.ListBucketResultV2, error) { +func (p *ListObjectsV2Paginator) NextPage(ctx context.Context) (*objstorage.ListBucketResultV2, error) { if !p.hasMore { return nil, fmt.Errorf("no more pages") } diff --git a/services/s3/paginator.go b/services/objectstorage/paginator.go similarity index 83% rename from services/s3/paginator.go rename to services/objectstorage/paginator.go index 69d63ba30..9781ba161 100644 --- a/services/s3/paginator.go +++ b/services/objectstorage/paginator.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // ListObjectVersionsInput represents the input for the ListObjectVersionsPaginator. @@ -14,9 +14,9 @@ type ListObjectVersionsInput struct { MaxKeys int } -// ListObjectVersionsPaginator is a paginator for listing object versions in an S3 bucket. +// ListObjectVersionsPaginator is a paginator for listing object versions in a bucket. type ListObjectVersionsPaginator struct { - client *s3.APIClient + client *objstorage.APIClient input *ListObjectVersionsInput keyMarker *string versionIDMarker *string @@ -24,7 +24,7 @@ type ListObjectVersionsPaginator struct { } // NewListObjectVersionsPaginator creates a new ListObjectVersionsPaginator. -func NewListObjectVersionsPaginator(client *s3.APIClient, input *ListObjectVersionsInput) *ListObjectVersionsPaginator { +func NewListObjectVersionsPaginator(client *objstorage.APIClient, input *ListObjectVersionsInput) *ListObjectVersionsPaginator { if input.MaxKeys == 0 { input.MaxKeys = 1000 } @@ -42,7 +42,7 @@ func (p *ListObjectVersionsPaginator) HasMorePages() bool { } // NextPage retrieves the next page of object versions. -func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context) (*s3.ListObjectVersionsOutput, error) { +func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context) (*objstorage.ListObjectVersionsOutput, error) { if !p.hasMore { return nil, fmt.Errorf("no more pages") } diff --git a/services/s3/policy.go b/services/objectstorage/policy.go similarity index 86% rename from services/s3/policy.go rename to services/objectstorage/policy.go index 40b76a7c0..275a4b158 100644 --- a/services/s3/policy.go +++ b/services/objectstorage/policy.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -11,14 +11,14 @@ import ( "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/utils/constant" ) // ErrBucketPolicyNotFound returned for 404 -var ErrBucketPolicyNotFound = errors.New("s3 bucket policy not found") +var ErrBucketPolicyNotFound = errors.New("bucket policy not found") // BucketPolicyModel is used to create, update and delete a bucket policy. type BucketPolicyModel struct { @@ -133,7 +133,7 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, bucketName types.String } // GetBucketPolicyCheck gets a bucket policy. -func (c *Client) GetBucketPolicyCheck(ctx context.Context, bucketName string) (*s3.BucketPolicy, error) { +func (c *Client) GetBucketPolicyCheck(ctx context.Context, bucketName string) (*objstorage.BucketPolicy, error) { policy, apiResponse, err := c.client.PolicyApi.GetBucketPolicy(ctx, bucketName).Execute() if err != nil { if apiResponse.HttpNotFound() { @@ -144,7 +144,7 @@ func (c *Client) GetBucketPolicyCheck(ctx context.Context, bucketName string) (* return policy, nil } -func setBucketPolicyData(policyResponse *s3.BucketPolicy, data *BucketPolicyModel) diag.Diagnostics { +func setBucketPolicyData(policyResponse *objstorage.BucketPolicy, data *BucketPolicyModel) diag.Diagnostics { var diags diag.Diagnostics policyData := bucketPolicy{ @@ -194,55 +194,55 @@ func setBucketPolicyData(policyResponse *s3.BucketPolicy, data *BucketPolicyMode return diags } -func buildBucketPolicyFromModel(policyModel *BucketPolicyModel) (s3.BucketPolicy, diag.Diagnostics) { +func buildBucketPolicyFromModel(policyModel *BucketPolicyModel) (objstorage.BucketPolicy, diag.Diagnostics) { var diags diag.Diagnostics - policyInput := s3.BucketPolicy{} + policyInput := objstorage.BucketPolicy{} policyData := bucketPolicy{} // Can't unmarshal directly in the API object, need to use an intermediary if diags = policyModel.Policy.Unmarshal(&policyData); diags.HasError() { - return s3.BucketPolicy{}, diags + return objstorage.BucketPolicy{}, diags } policyInput.Id = policyData.ID policyInput.Version = policyData.Version - statement := make([]s3.BucketPolicyStatement, 0, len(policyData.Statement)) + statement := make([]objstorage.BucketPolicyStatement, 0, len(policyData.Statement)) for _, statementData := range policyData.Statement { - statementInput := s3.NewBucketPolicyStatement(statementData.Action, statementData.Effect, statementData.Resources) + statementInput := objstorage.NewBucketPolicyStatement(statementData.Action, statementData.Effect, statementData.Resources) statementInput.Sid = statementData.SID - statementInput.Principal = s3.NewPrincipal(statementData.Principal) + statementInput.Principal = objstorage.NewPrincipal(statementData.Principal) if statementData.Condition != nil { - statementInput.Condition = s3.NewBucketPolicyCondition() + statementInput.Condition = objstorage.NewBucketPolicyCondition() if statementData.Condition.IPs != nil { - statementInput.Condition.IpAddress = s3.NewBucketPolicyConditionIpAddress() + statementInput.Condition.IpAddress = objstorage.NewBucketPolicyConditionIpAddress() ips := statementData.Condition.IPs statementInput.Condition.IpAddress.AwsSourceIp = &ips } if statementData.Condition.ExcludedIPs != nil { - statementInput.Condition.NotIpAddress = s3.NewBucketPolicyConditionIpAddress() + statementInput.Condition.NotIpAddress = objstorage.NewBucketPolicyConditionIpAddress() excludedIPs := statementData.Condition.ExcludedIPs statementInput.Condition.NotIpAddress.AwsSourceIp = &excludedIPs } if statementData.Condition.DateGreaterThan != nil { - var t *s3.IonosTime + var t *objstorage.IonosTime var err error if t, err = convertToIonosTime(*statementData.Condition.DateGreaterThan); err != nil { diags.AddError("Error converting policy condition 'greater than' date", err.Error()) - return s3.BucketPolicy{}, diags + return objstorage.BucketPolicy{}, diags } - dateGreater := s3.BucketPolicyConditionDate{AwsCurrentTime: t} + dateGreater := objstorage.BucketPolicyConditionDate{AwsCurrentTime: t} statementInput.Condition.DateGreaterThan = &dateGreater } if statementData.Condition.DateLessThan != nil { - var t *s3.IonosTime + var t *objstorage.IonosTime var err error if t, err = convertToIonosTime(*statementData.Condition.DateLessThan); err != nil { diags.AddError("Error converting policy condition 'less than' date", err.Error()) - return s3.BucketPolicy{}, diags + return objstorage.BucketPolicy{}, diags } - dateLess := s3.BucketPolicyConditionDate{AwsCurrentTime: t} + dateLess := objstorage.BucketPolicyConditionDate{AwsCurrentTime: t} statementInput.Condition.DateLessThan = &dateLess } } @@ -253,8 +253,8 @@ func buildBucketPolicyFromModel(policyModel *BucketPolicyModel) (s3.BucketPolicy return policyInput, diags } -func convertToIonosTime(targetTime string) (*s3.IonosTime, error) { - var ionosTime s3.IonosTime +func convertToIonosTime(targetTime string) (*objstorage.IonosTime, error) { + var ionosTime objstorage.IonosTime var convertedTime time.Time var err error diff --git a/services/s3/public_access_block.go b/services/objectstorage/public_access_block.go similarity index 89% rename from services/s3/public_access_block.go rename to services/objectstorage/public_access_block.go index c8af9b98e..846ad7738 100644 --- a/services/s3/public_access_block.go +++ b/services/objectstorage/public_access_block.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // BucketPublicAccessBlockResourceModel defines the expected inputs for creating a new BucketPublicAccessBlock. @@ -66,8 +66,8 @@ func (c *Client) DeleteBucketPublicAccessBlock(ctx context.Context, bucketName t } // GetBucketPublicAccessBlockCheck gets a BucketPublicAccessBlock. -func buildPublicAccessBlockFromModel(model *BucketPublicAccessBlockResourceModel) s3.BlockPublicAccessPayload { - input := s3.BlockPublicAccessPayload{ +func buildPublicAccessBlockFromModel(model *BucketPublicAccessBlockResourceModel) objstorage.BlockPublicAccessPayload { + input := objstorage.BlockPublicAccessPayload{ BlockPublicPolicy: model.BlockPublicPolicy.ValueBoolPointer(), IgnorePublicAcls: model.IgnorePublicACLS.ValueBoolPointer(), BlockPublicAcls: model.BlockPublicACLS.ValueBoolPointer(), @@ -76,7 +76,7 @@ func buildPublicAccessBlockFromModel(model *BucketPublicAccessBlockResourceModel return input } -func buildPublicAccessBlockModelFromAPIResponse(output *s3.BlockPublicAccessOutput, model *BucketPublicAccessBlockResourceModel) *BucketPublicAccessBlockResourceModel { +func buildPublicAccessBlockModelFromAPIResponse(output *objstorage.BlockPublicAccessOutput, model *BucketPublicAccessBlockResourceModel) *BucketPublicAccessBlockResourceModel { built := &BucketPublicAccessBlockResourceModel{ Bucket: model.Bucket, BlockPublicACLS: types.BoolPointerValue(output.BlockPublicAcls), diff --git a/services/s3/sse.go b/services/objectstorage/sse.go similarity index 79% rename from services/s3/sse.go rename to services/objectstorage/sse.go index ba909170b..6053c58de 100644 --- a/services/s3/sse.go +++ b/services/objectstorage/sse.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // ServerSideEncryptionConfigurationModel defines the expected inputs for creating a new ServerSideEncryptionConfiguration. @@ -68,14 +68,14 @@ func (c *Client) DeleteBucketSSE(ctx context.Context, bucketName types.String) e return err } -func buildServerSideEncryptionConfigurationModelFromAPIResponse(output *s3.ServerSideEncryptionConfiguration, data *ServerSideEncryptionConfigurationModel) *ServerSideEncryptionConfigurationModel { +func buildServerSideEncryptionConfigurationModelFromAPIResponse(output *objstorage.ServerSideEncryptionConfiguration, data *ServerSideEncryptionConfigurationModel) *ServerSideEncryptionConfigurationModel { return &ServerSideEncryptionConfigurationModel{ Bucket: data.Bucket, Rules: buildServerSideEncryptionRulesFromAPIResponse(output.Rules), } } -func buildServerSideEncryptionRulesFromAPIResponse(data *[]s3.ServerSideEncryptionRule) []sseRule { +func buildServerSideEncryptionRulesFromAPIResponse(data *[]objstorage.ServerSideEncryptionRule) []sseRule { if data == nil { return nil } @@ -100,18 +100,18 @@ func buildServerSideEncryptionRulesFromAPIResponse(data *[]s3.ServerSideEncrypti return rules } -func buildServerSideEncryptionConfigurationFromModel(data *ServerSideEncryptionConfigurationModel) s3.PutBucketEncryptionRequest { - return s3.PutBucketEncryptionRequest{ +func buildServerSideEncryptionConfigurationFromModel(data *ServerSideEncryptionConfigurationModel) objstorage.PutBucketEncryptionRequest { + return objstorage.PutBucketEncryptionRequest{ Rules: buildServerSideEncryptionRulesFromModel(data.Rules), } } -func buildServerSideEncryptionRulesFromModel(data []sseRule) *[]s3.ServerSideEncryptionRule { - rules := make([]s3.ServerSideEncryptionRule, 0, len(data)) +func buildServerSideEncryptionRulesFromModel(data []sseRule) *[]objstorage.ServerSideEncryptionRule { + rules := make([]objstorage.ServerSideEncryptionRule, 0, len(data)) for _, r := range data { - rules = append(rules, s3.ServerSideEncryptionRule{ - ApplyServerSideEncryptionByDefault: &s3.ServerSideEncryptionByDefault{ - SSEAlgorithm: s3.ServerSideEncryption(r.ApplyServerSideEncryptionByDefault.SSEAlgorithm.ValueString()).Ptr(), + rules = append(rules, objstorage.ServerSideEncryptionRule{ + ApplyServerSideEncryptionByDefault: &objstorage.ServerSideEncryptionByDefault{ + SSEAlgorithm: objstorage.ServerSideEncryption(r.ApplyServerSideEncryptionByDefault.SSEAlgorithm.ValueString()).Ptr(), }, }) } diff --git a/services/s3/tags.go b/services/objectstorage/tags.go similarity index 95% rename from services/s3/tags.go rename to services/objectstorage/tags.go index 8252ecdaf..92e730a75 100644 --- a/services/s3/tags.go +++ b/services/objectstorage/tags.go @@ -1,10 +1,10 @@ -package s3 +package objectstorage import ( "context" "fmt" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" "github.com/ionos-cloud/terraform-provider-ionoscloud/v6/internal/tags" ) @@ -29,7 +29,7 @@ func (c *Client) UpdateBucketTags(ctx context.Context, bucketName string, new, o tagsToKeep := allTags.Ignore(old).Ignore(new) if len(new)+len(tagsToKeep) > 0 { // The API overwrite the tags list every time, so we need to merge new and the ones we want to keep. if _, err = c.client.TaggingApi.PutBucketTagging(ctx, bucketName).PutBucketTaggingRequest( - s3.PutBucketTaggingRequest{ + objstorage.PutBucketTaggingRequest{ TagSet: new.Merge(tagsToKeep).ToListPointer(), }).Execute(); err != nil { return fmt.Errorf("failed to update bucket tags: %w", err) @@ -90,7 +90,7 @@ func (c *Client) UpdateObjectTags(ctx context.Context, bucketName, objectName st tagsToKeep := allTags.Ignore(old).Ignore(new) if len(new)+len(tagsToKeep) > 0 { // The API overwrite the tags list every time, so we need to merge new and the ones we want to keep. if _, _, err = c.client.TaggingApi.PutObjectTagging(ctx, bucketName, objectName).PutObjectTaggingRequest( - s3.PutObjectTaggingRequest{ + objstorage.PutObjectTaggingRequest{ TagSet: new.Merge(tagsToKeep).ToListPointer(), }).Execute(); err != nil { return fmt.Errorf("failed to update object tags: %w", err) diff --git a/services/s3/versioning.go b/services/objectstorage/versioning.go similarity index 79% rename from services/s3/versioning.go rename to services/objectstorage/versioning.go index e2bc66f58..00f856672 100644 --- a/services/s3/versioning.go +++ b/services/objectstorage/versioning.go @@ -1,4 +1,4 @@ -package s3 +package objectstorage import ( "context" @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // BucketVersioningResourceModel defines the expected inputs for creating a new BucketVersioning. @@ -69,14 +69,14 @@ func (c *Client) UpdateBucketVersioning(ctx context.Context, data *BucketVersion // DeleteBucketVersioning deletes a BucketVersioning. func (c *Client) DeleteBucketVersioning(ctx context.Context, data *BucketVersioningResourceModel) error { - // Removing S3 bucket versioning for un-versioned bucket from state - if data.VersioningConfiguration.Status.ValueString() == string(s3.BUCKETVERSIONINGSTATUS_SUSPENDED) { + // Removing bucket versioning for un-versioned bucket from state + if data.VersioningConfiguration.Status.ValueString() == string(objstorage.BUCKETVERSIONINGSTATUS_SUSPENDED) { return nil } _, err := c.client.VersioningApi.PutBucketVersioning(ctx, data.Bucket.ValueString()). - PutBucketVersioningRequest(s3.PutBucketVersioningRequest{ - Status: s3.BUCKETVERSIONINGSTATUS_SUSPENDED.Ptr(), + PutBucketVersioningRequest(objstorage.PutBucketVersioningRequest{ + Status: objstorage.BUCKETVERSIONINGSTATUS_SUSPENDED.Ptr(), }).Execute() if isInvalidStateBucketWithObjectLock(err) { return nil @@ -85,19 +85,19 @@ func (c *Client) DeleteBucketVersioning(ctx context.Context, data *BucketVersion return err } -func buildPutVersioningRequestFromModel(data *BucketVersioningResourceModel) s3.PutBucketVersioningRequest { - var request s3.PutBucketVersioningRequest +func buildPutVersioningRequestFromModel(data *BucketVersioningResourceModel) objstorage.PutBucketVersioningRequest { + var request objstorage.PutBucketVersioningRequest if !data.VersioningConfiguration.Status.IsNull() { - request.Status = s3.BucketVersioningStatus(data.VersioningConfiguration.Status.ValueString()).Ptr() + request.Status = objstorage.BucketVersioningStatus(data.VersioningConfiguration.Status.ValueString()).Ptr() } if !data.VersioningConfiguration.MfaDelete.IsNull() { - request.MfaDelete = s3.MfaDeleteStatus(data.VersioningConfiguration.MfaDelete.ValueString()).Ptr() + request.MfaDelete = objstorage.MfaDeleteStatus(data.VersioningConfiguration.MfaDelete.ValueString()).Ptr() } return request } -func buildBucketVersioningModelFromAPIResponse(output *s3.GetBucketVersioningOutput, bucket types.String) *BucketVersioningResourceModel { +func buildBucketVersioningModelFromAPIResponse(output *objstorage.GetBucketVersioningOutput, bucket types.String) *BucketVersioningResourceModel { var vc versioningConfiguration if output.Status != nil { vc.Status = types.StringValue(string(*output.Status)) diff --git a/services/s3/website.go b/services/objectstorage/website.go similarity index 87% rename from services/s3/website.go rename to services/objectstorage/website.go index 0bdc20ae9..bba8f35d8 100644 --- a/services/s3/website.go +++ b/services/objectstorage/website.go @@ -1,11 +1,11 @@ -package s3 +package objectstorage import ( "context" "fmt" "github.com/hashicorp/terraform-plugin-framework/types" - s3 "github.com/ionos-cloud/sdk-go-s3" + objstorage "github.com/ionos-cloud/sdk-go-s3" ) // BucketWebsiteConfigurationModel defines the expected inputs for creating a new BucketWebsiteConfiguration. @@ -94,7 +94,7 @@ func (c *Client) DeleteBucketWebsite(ctx context.Context, bucketName types.Strin return err } -func buildBucketWebsiteConfigurationModelFromAPIResponse(output *s3.GetBucketWebsiteOutput, bucket types.String) *BucketWebsiteConfigurationModel { +func buildBucketWebsiteConfigurationModelFromAPIResponse(output *objstorage.GetBucketWebsiteOutput, bucket types.String) *BucketWebsiteConfigurationModel { built := &BucketWebsiteConfigurationModel{ Bucket: bucket, } @@ -143,8 +143,8 @@ func buildBucketWebsiteConfigurationModelFromAPIResponse(output *s3.GetBucketWeb return built } -func buildBucketWebsiteConfigurationFromModel(data *BucketWebsiteConfigurationModel) s3.PutBucketWebsiteRequest { - return s3.PutBucketWebsiteRequest{ +func buildBucketWebsiteConfigurationFromModel(data *BucketWebsiteConfigurationModel) objstorage.PutBucketWebsiteRequest { + return objstorage.PutBucketWebsiteRequest{ IndexDocument: buildIndexDocumentFromModel(data.IndexDocument), ErrorDocument: buildErrorDocumentFromModel(data.ErrorDocument), RedirectAllRequestsTo: buildRedirectAllRequestsToFromModel(data.RedirectAllRequestsTo), @@ -152,53 +152,53 @@ func buildBucketWebsiteConfigurationFromModel(data *BucketWebsiteConfigurationMo } } -func buildIndexDocumentFromModel(data *indexDocument) *s3.IndexDocument { +func buildIndexDocumentFromModel(data *indexDocument) *objstorage.IndexDocument { if data == nil { return nil } - return &s3.IndexDocument{ + return &objstorage.IndexDocument{ Suffix: data.Suffix.ValueStringPointer(), } } -func buildErrorDocumentFromModel(data *errorDocument) *s3.ErrorDocument { +func buildErrorDocumentFromModel(data *errorDocument) *objstorage.ErrorDocument { if data == nil { return nil } - return &s3.ErrorDocument{ + return &objstorage.ErrorDocument{ Key: data.Key.ValueStringPointer(), } } -func buildRedirectAllRequestsToFromModel(data *redirectAllRequestsTo) *s3.RedirectAllRequestsTo { +func buildRedirectAllRequestsToFromModel(data *redirectAllRequestsTo) *objstorage.RedirectAllRequestsTo { if data == nil { return nil } - return &s3.RedirectAllRequestsTo{ + return &objstorage.RedirectAllRequestsTo{ HostName: data.HostName.ValueStringPointer(), Protocol: data.Protocol.ValueStringPointer(), } } -func buildRoutingRulesFromModel(data []routingRule) *[]s3.RoutingRule { +func buildRoutingRulesFromModel(data []routingRule) *[]objstorage.RoutingRule { if len(data) == 0 { return nil } - rules := make([]s3.RoutingRule, 0, len(data)) + rules := make([]objstorage.RoutingRule, 0, len(data)) for _, r := range data { - var rl s3.RoutingRule + var rl objstorage.RoutingRule if r.Condition != nil { - rl.Condition = &s3.RoutingRuleCondition{ + rl.Condition = &objstorage.RoutingRuleCondition{ HttpErrorCodeReturnedEquals: r.Condition.HTTPErrorCodeReturnedEquals.ValueStringPointer(), KeyPrefixEquals: r.Condition.KeyPrefixEquals.ValueStringPointer(), } } if r.Redirect != nil { - rl.Redirect = &s3.Redirect{ + rl.Redirect = &objstorage.Redirect{ HostName: r.Redirect.HostName.ValueStringPointer(), HttpRedirectCode: r.Redirect.HTTPRedirectCode.ValueStringPointer(), Protocol: r.Redirect.Protocol.ValueStringPointer(), diff --git a/services/s3/errors.go b/services/s3/errors.go deleted file mode 100644 index 6612620d2..000000000 --- a/services/s3/errors.go +++ /dev/null @@ -1,44 +0,0 @@ -package s3 - -import ( - "encoding/xml" - "errors" - "log" - - s3 "github.com/ionos-cloud/sdk-go-s3" -) - -func isBucketNotEmptyError(err error) bool { - var apiErr s3.GenericOpenAPIError - if errors.As(err, &apiErr) { - body := apiErr.Body() - var s3Err s3.Error - if err := xml.Unmarshal(body, &s3Err); err != nil { - log.Printf("failed to unmarshal error response: %v", err) - return false - } - - if s3Err.Code != nil && *s3Err.Code == "BucketNotEmpty" { - return true - } - } - return false -} - -func isInvalidStateBucketWithObjectLock(err error) bool { - var apiErr s3.GenericOpenAPIError - if errors.As(err, &apiErr) { - body := apiErr.Body() - var s3Err s3.Error - if err := xml.Unmarshal(body, &s3Err); err != nil { - log.Printf("failed to unmarshal error response: %v", err) - return false - } - - if s3Err.Code != nil && *s3Err.Code == "InvalidBucketState" && - s3Err.Message != nil && *s3Err.Message == "bucket versioning cannot be disabled on buckets with object lock enabled" { - return true - } - } - return false -}