From ccc52289b499bd7c66964a16ad9f3236b5911e66 Mon Sep 17 00:00:00 2001 From: shreyanshjain7174 Date: Mon, 4 Nov 2024 07:10:21 -0500 Subject: [PATCH] rgw/restore: s3tests to test restore object functionality. This tests are added to tests temporary restore, permanent restore and read through resotre object functionality, this includes zonegroup parameters and checks. Signed-off-by: shreyanshjain7174 --- s3tests.conf | 176 +++++++++++++++++++++++++++ s3tests.conf.SAMPLE | 4 + s3tests_boto3/functional/__init__.py | 24 ++++ s3tests_boto3/functional/test_s3.py | 139 +++++++++++++++++++++ 4 files changed, 343 insertions(+) create mode 100644 s3tests.conf diff --git a/s3tests.conf b/s3tests.conf new file mode 100644 index 00000000..ec5c3001 --- /dev/null +++ b/s3tests.conf @@ -0,0 +1,176 @@ +[DEFAULT] +## this section is just used for host, port and bucket_prefix + +# host set for rgw in vstart.sh +host = localhost + +# port set for rgw in vstart.sh +port = 8000 + +## say "False" to disable TLS +is_secure = False + +## say "False" to disable SSL Verify +ssl_verify = False + +[fixtures] +## all the buckets created will start with this prefix; +## {random} will be filled with random characters to pad +## the prefix to 30 characters long, and avoid collisions +bucket prefix = yournamehere-{random}- + +# all the iam account resources (users, roles, etc) created +# will start with this name prefix +iam name prefix = s3-tests- + +# all the iam account resources (users, roles, etc) created +# will start with this path prefix +iam path prefix = /s3-tests/ + +[s3 main] +# main display_name set in vstart.sh +display_name = M. Tester + +# main user_idname set in vstart.sh +user_id = testid + +# main email set in vstart.sh +email = tester@ceph.com + +# zonegroup api_name for bucket location +api_name = default + +## main AWS access key +access_key = 0555b35654ad1656d804 + +## main AWS secret key +secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== + +## replace with key id obtained when secret is created, or delete if KMS not tested +#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef + +## Storage classes +storage_classes = "LUKEWARM, FROZEN" + +## Lifecycle debug interval (default: 10) +lc_debug_interval = 20 +## Restore debug interval (default: 100) +rgw_restore_debug_interval = 60 + +[s3 alt] +# alt display_name set in vstart.sh +display_name = john.doe +## alt email set in vstart.sh +email = john.doe@example.com + +# alt user_id set in vstart.sh +user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 + +# alt AWS access key set in vstart.sh +access_key = NOPQRSTUVWXYZABCDEFG + +# alt AWS secret key set in vstart.sh +secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm + +[s3 cloud] +## to run the testcases with "cloud_transition" for transition +## and "cloud_restore" for restore attribute. +## Note: the waiting time may have to tweaked depending on +## the I/O latency to the cloud endpoint. + +## host set for cloud endpoint +host = localhost + +## port set for cloud endpoint +port = 8001 + +## say "False" to disable TLS +is_secure = False + +## cloud endpoint credentials +access_key = 0555b35654ad1656d804 +secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== + +## storage class configured as cloud tier on local rgw server +cloud_storage_class = CLOUDTIER + +## Below are optional - + +## Above configured cloud storage class config options +retain_head_object = true +allow_read_through = true # change it to enable read_through +read_through_restore_days = 2 +target_storage_class = Target_SC +target_path = cloud-bucket + +## another regular storage class to test multiple transition rules, +storage_class = S1 + +[s3 tenant] +# tenant display_name set in vstart.sh +display_name = testx$tenanteduser + +# tenant user_id set in vstart.sh +user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef + +# tenant AWS secret key set in vstart.sh +access_key = HIJKLMNOPQRSTUVWXYZA + +# tenant AWS secret key set in vstart.sh +secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab + +# tenant email set in vstart.sh +email = tenanteduser@example.com + +# tenant name +tenant = testx + +#following section needs to be added for all sts-tests +[iam] +#used for iam operations in sts-tests +#email from vstart.sh +email = s3@example.com + +#user_id from vstart.sh +user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + +#access_key from vstart.sh +access_key = ABCDEFGHIJKLMNOPQRST + +#secret_key vstart.sh +secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn + +#display_name from vstart.sh +display_name = youruseridhere + +# iam account root user for iam_account tests +[iam root] +access_key = AAAAAAAAAAAAAAAAAAaa +secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +user_id = RGW11111111111111111 +email = account1@ceph.com + +# iam account root user in a different account than [iam root] +[iam alt root] +access_key = BBBBBBBBBBBBBBBBBBbb +secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +user_id = RGW22222222222222222 +email = account2@ceph.com + +#following section needs to be added when you want to run Assume Role With Webidentity test +[webidentity] +#used for assume role with web identity test in sts-tests +#all parameters will be obtained from ceph/qa/tasks/keycloak.py +token= + +aud= + +sub= + +azp= + +user_token=] + +thumbprint= + +KC_REALM= diff --git a/s3tests.conf.SAMPLE b/s3tests.conf.SAMPLE index 3f0992aa..b2242299 100644 --- a/s3tests.conf.SAMPLE +++ b/s3tests.conf.SAMPLE @@ -54,6 +54,8 @@ secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== ## Lifecycle debug interval (default: 10) #lc_debug_interval = 20 +## Restore debug interval (default: 100) +#rgw_restore_debug_interval = 60 [s3 alt] # alt display_name set in vstart.sh @@ -95,6 +97,8 @@ secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm ## Above configured cloud storage class config options # retain_head_object = false +# allow_read_through = false # change it to enable read_through +# read_through_restore_days = 2 # target_storage_class = Target_SC # target_path = cloud-bucket diff --git a/s3tests_boto3/functional/__init__.py b/s3tests_boto3/functional/__init__.py index 7ca874b4..43292b12 100644 --- a/s3tests_boto3/functional/__init__.py +++ b/s3tests_boto3/functional/__init__.py @@ -248,6 +248,11 @@ def configure(): except (configparser.NoSectionError, configparser.NoOptionError): config.lc_debug_interval = 10 + try: + config.rgw_restore_debug_interval = int(cfg.get('s3 main',"rgw_restore_debug_interval")) + except (configparser.NoSectionError, configparser.NoOptionError): + config.rgw_restore_debug_interval = 100 + config.alt_access_key = cfg.get('s3 alt',"access_key") config.alt_secret_key = cfg.get('s3 alt',"secret_key") config.alt_display_name = cfg.get('s3 alt',"display_name") @@ -375,6 +380,11 @@ def get_cloud_config(cfg): except (configparser.NoSectionError, configparser.NoOptionError): config.cloud_retain_head_object = None + try: + config.allow_read_through = cfg.get('s3 cloud',"allow_read_through") + except (configparser.NoSectionError, configparser.NoOptionError): + config.allow_read_through = False + try: config.cloud_target_path = cfg.get('s3 cloud',"target_path") except (configparser.NoSectionError, configparser.NoOptionError): @@ -389,6 +399,11 @@ def get_cloud_config(cfg): config.cloud_regular_storage_class = cfg.get('s3 cloud', "storage_class") except (configparser.NoSectionError, configparser.NoOptionError): config.cloud_regular_storage_class = None + + try: + config.read_through_restore_days = int(cfg.get('s3 cloud', "read_through_restore_days")) + except (configparser.NoSectionError, configparser.NoOptionError): + config.read_through_restore_days = 10 def get_client(client_config=None): @@ -769,6 +784,9 @@ def get_cloud_storage_class(): def get_cloud_retain_head_object(): return config.cloud_retain_head_object +def get_allow_read_through(): + return config.allow_read_through + def get_cloud_regular_storage_class(): return config.cloud_regular_storage_class @@ -780,3 +798,9 @@ def get_cloud_target_storage_class(): def get_lc_debug_interval(): return config.lc_debug_interval + +def get_restore_debug_interval(): + return config.rgw_restore_debug_interval + +def get_read_through_days(): + return config.read_through_restore_days diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py index e80afe64..5525f7c3 100644 --- a/s3tests_boto3/functional/test_s3.py +++ b/s3tests_boto3/functional/test_s3.py @@ -78,6 +78,7 @@ get_svc_client, get_cloud_storage_class, get_cloud_retain_head_object, + get_allow_read_through, get_cloud_regular_storage_class, get_cloud_target_path, get_cloud_target_storage_class, @@ -85,6 +86,8 @@ nuke_prefixed_buckets, configured_storage_classes, get_lc_debug_interval, + get_restore_debug_interval, + get_read_through_days, ) @@ -9430,6 +9433,15 @@ def verify_object(client, bucket, key, content=None, sc=None): body = _get_body(response) assert body == content +def verify_transition(client, bucket, key, sc=None): + response = client.head_object(Bucket=bucket, Key=key) + + # Iterate over the contents to find the StorageClass + if 'StorageClass' in response: + assert response['StorageClass'] == sc + else: # storage class should be STANDARD + assert 'STANDARD' == sc + # The test harness for lifecycle is configured to treat days as 10 second intervals. @pytest.mark.lifecycle @pytest.mark.lifecycle_transition @@ -9727,6 +9739,133 @@ def test_lifecycle_cloud_transition_large_obj(): expire1_key1_str = prefix + keys[1] verify_object(cloud_client, target_path, expire1_key1_str, data, target_sc) +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.cloud_restore +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore +def test_restore_object_temporary(): + cloud_sc = get_cloud_storage_class() + if cloud_sc is None: + pytest.skip('[s3 cloud] section missing cloud_storage_class') + + bucket = get_new_bucket() + client = get_client() + key = 'test_restore_temp' + data = 'temporary restore data' + + # Put object + client.put_object(Bucket=bucket, Key=key, Body=data) + verify_object(client, bucket, key, data) + + # Transition object to cloud storage class + rules = [{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': '', 'Status': 'Enabled'}] + lifecycle = {'Rules': rules} + client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle) + + lc_interval = get_lc_debug_interval() + restore_interval = get_restore_debug_interval() + time.sleep(2 * lc_interval) + + # Verify object is transitioned + verify_transition(client, bucket, key, cloud_sc) + + # Restore object temporarily + client.restore_object(Bucket=bucket, Key=key, RestoreRequest={'Days': 2}) + + # Verify object is restored temporarily + verify_transition(client, bucket, key, cloud_sc) + response = client.head_object(Bucket=bucket, Key=key) + assert response['ContentLength'] == len(data) + time.sleep(2 * (restore_interval + lc_interval)) + + #verify object expired + verify_object(client, bucket, key, data, cloud_sc) + +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.cloud_restore +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore +def test_restore_object_permanent(): + cloud_sc = get_cloud_storage_class() + if cloud_sc is None: + pytest.skip('[s3 cloud] section missing cloud_storage_class') + + bucket = get_new_bucket() + client = get_client() + key = 'test_restore_perm' + data = 'permanent restore data' + + # Put object + client.put_object(Bucket=bucket, Key=key, Body=data) + verify_object(client, bucket, key, data) + + # Transition object to cloud storage class + rules = [{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': '', 'Status': 'Enabled'}] + lifecycle = {'Rules': rules} + client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle) + + lc_interval = get_lc_debug_interval() + restore_interval = get_restore_debug_interval() + time.sleep(2 * lc_interval) + + # Verify object is transitioned + verify_transition(client, bucket, key, cloud_sc) + + # Restore object permanently + client.restore_object(Bucket=bucket, Key=key, RestoreRequest={}) + + # Verify object is restored permanently + verify_transition(client, bucket, key, 'STANDARD') + response = client.head_object(Bucket=bucket, Key=key) + assert response['ContentLength'] == len(data) + +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.cloud_restore +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore +def test_read_through(): + cloud_sc = get_cloud_storage_class() + if cloud_sc is None: + pytest.skip('[s3 cloud] section missing cloud_storage_class') + + bucket = get_new_bucket() + client = get_client() + key = 'test_restore_readthrough' + data = 'restore data with readthrough' + + # Put object + client.put_object(Bucket=bucket, Key=key, Body=data) + verify_object(client, bucket, key, data) + + # Transition object to cloud storage class + rules = [{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': '', 'Status': 'Enabled'}] + lifecycle = {'Rules': rules} + client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle) + + lc_interval = get_lc_debug_interval() + restore_interval = get_read_through_days() + time.sleep(2 * lc_interval) + + # Check the storage class after transitioning + verify_transition(client, bucket, key, cloud_sc) + + # Restore the object using read_through request + allow_readthrough = get_allow_read_through() + if allow_readthrough: + response = client.get_object(Bucket=bucket, Key=key) + assert response['ContentLength'] == len(data) + time.sleep(2 * (restore_interval + lc_interval)) + + verify_object(client, bucket, key, sc=cloud_sc) + + else: + with assert_raises(ClientError) as e: + response = client.get_object(Bucket=bucket, Key=key) + assert e.exception.response['Error']['Code'] == '403' + @pytest.mark.encryption @pytest.mark.fails_on_dbstore def test_encrypted_transfer_1b():