Skip to content

Commit

Permalink
remove "direct" parameter
Browse files Browse the repository at this point in the history
  • Loading branch information
Luke McCrone authored and Luke McCrone committed Jan 26, 2024
1 parent 23a4073 commit f6a3fe4
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 62 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/system.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
git clone https://github.com/bats-core/bats-core.git
cd bats-core && ./install.sh $HOME
#- name: Install AWS
#- name: Install AWS (local only)
# if: ${{ env.GITHUB_ACTIONS_RUNNER }} == ''
# uses: chrislennon/[email protected]

Expand Down
2 changes: 1 addition & 1 deletion tests/.env.default
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ AWS_REGION=us-west-2
AWS_PROFILE=versity
VERSITY_EXE=./versitygw
BACKEND=posix
DIRECT=0
LOCAL_FOLDER=/tmp/gw
6 changes: 0 additions & 6 deletions tests/.env.s3

This file was deleted.

1 change: 0 additions & 1 deletion tests/.env.versitygw
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,4 @@ AWS_REGION=us-east-1
AWS_PROFILE=versity
VERSITY_EXE=./versitygw
BACKEND=posix
DIRECT=0
LOCAL_FOLDER=/tmp/gw
7 changes: 3 additions & 4 deletions tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,10 @@ Instructions:
1. Build the `versitygw` binary.
2. Create a local AWS profile for connection to S3, and add the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` values above to the profile.
3. Create an environment file (`.env`) similar to the ones in this folder, setting the `AWS_PROFILE` parameter to the name of the profile you created.
4. Set the `DIRECT` parameter to `0` to communicate via versitygw, or `1` to communicate directly with S3.
5. In the root repo folder, run with `VERSITYGW_TEST_ENV=<env file> tests/s3_bucket_tests.sh`.
6. If running/testing the GitHub workflow, create a `.secrets` file, and set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` parameters here to the values of your AWS S3 IAM account.
4. In the root repo folder, run with `VERSITYGW_TEST_ENV=<env file> tests/s3_bucket_tests.sh`.
5. If running/testing the GitHub workflow locally, create a `.secrets` file, and set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` parameters here to the values of your AWS S3 IAM account.
```
AWS_ACCESS_KEY_ID=<key_id>
AWS_SECRET_ACCESS_KEY=<secret_key>
```
7. To run the workflow locally, install **act** and run with `act -W .github/workflows/system.yml`.
6. To run the workflow locally, install **act** and run with `act -W .github/workflows/system.yml`.
48 changes: 8 additions & 40 deletions tests/s3_bucket_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,7 @@ create_bucket() {
fi
local exit_code=0
local error
if $direct; then
error=$(aws s3 mb s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
else
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 mb s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
fi
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 mb s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error creating bucket: $error"
return 1
Expand All @@ -28,11 +24,7 @@ delete_bucket() {
fi
local exit_code=0
local error
if $direct; then
error=$(aws s3 rb s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
else
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 rb s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
fi
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 rb s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]]; then
return 0
Expand All @@ -52,11 +44,7 @@ bucket_exists() {
fi
local exit_code=0
local error
if $direct; then
error=$(aws s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
else
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
fi
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == *"The specified bucket does not exist"* ]] || [[ "$error" == *"Access Denied"* ]]; then
return 1
Expand Down Expand Up @@ -98,11 +86,7 @@ object_exists() {
fi
local exit_code=0
local error
if $direct; then
error=$(aws s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
else
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
fi
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code="$?"
if [ $exit_code -ne 0 ]; then
if [[ "$error" == "" ]]; then
return 1
Expand All @@ -121,11 +105,7 @@ put_object() {
fi
local exit_code=0
local error
if $direct; then
error=$(aws s3 cp "$1" s3://"$2" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
else
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 cp "$1" s3://"$2" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
fi
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 cp "$1" s3://"$2" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error copying object to bucket: $error"
return 1
Expand Down Expand Up @@ -160,11 +140,7 @@ delete_object() {
fi
local exit_code=0
local error
if $direct; then
error=$(aws s3 rm s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
else
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 rm s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
fi
error=$(aws --endpoint-url http://127.0.0.1:7070 s3 rm s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error deleting object: $error"
return 1
Expand All @@ -175,11 +151,7 @@ delete_object() {
list_buckets() {
local exit_code=0
local output
if $direct; then
output=$(aws s3 ls --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
else
output=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
fi
output=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error listing buckets: $output"
return 1
Expand All @@ -201,11 +173,7 @@ list_objects() {
fi
local exit_code=0
local output
if $direct; then
output=$(aws s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
else
output=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
fi
output=$(aws --endpoint-url http://127.0.0.1:7070 s3 ls s3://"$1" --region "$AWS_REGION" --profile "$AWS_PROFILE" 2>&1) || exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "error listing objects: $output"
return 1
Expand Down
11 changes: 2 additions & 9 deletions tests/tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,9 @@ setup() {
echo "No local storage folder set"
return 1
fi
if [ -z "$DIRECT" ] || [ "$DIRECT" -eq 0 ]; then
direct=false
ROOT_ACCESS_KEY="$AWS_ACCESS_KEY_ID" ROOT_SECRET_KEY="$AWS_SECRET_ACCESS_KEY" "$VERSITY_EXE" "$BACKEND" "$LOCAL_FOLDER" &
versitygw_pid=$!
else
direct=true
fi
ROOT_ACCESS_KEY="$AWS_ACCESS_KEY_ID" ROOT_SECRET_KEY="$AWS_SECRET_ACCESS_KEY" "$VERSITY_EXE" "$BACKEND" "$LOCAL_FOLDER" &
versitygw_pid=$!
export versitygw_pid
export direct
#echo "$VERSITYGW_TEST_ENV $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY $VERSITY_EXE $BACKEND $DIRECT $AWS_REGION"
}

fail() {
Expand Down

0 comments on commit f6a3fe4

Please sign in to comment.